diff --git a/META b/META
index 1537e269e5dac56df658822986288966955e0fc9..12961437fcf9bbbf3f423a14bc95af332a936f7a 100644
--- a/META
+++ b/META
@@ -3,9 +3,9 @@
   Api_revision:  0
   Major:         2
   Meta:          1
-  Micro:         0
+  Micro:         1
   Minor:         1
   Name:          slurm
   Release:       1
   Release_tags:  dist
-  Version:       2.1.0
+  Version:       2.1.1
diff --git a/NEWS b/NEWS
index c40ec0e8b17995e3f65d8fffcd8a0ccdb5109df0..6ab1d7b2a0974b6b32e6520acee21da643b575bd 100644
--- a/NEWS
+++ b/NEWS
@@ -3,6 +3,43 @@ documents those changes that are of interest to users and admins.
 
 * Changes in SLURM 2.1.1
 =============================
+ -- Fix for case sensitive databases when a slurmctld has a mixed case
+    clustername to lower case the string to easy compares.
+ -- Fix squeue if job is completing and failed to print remaining
+    nodes instead of failed message.
+ -- Fix sview core when searching for partitions by state.
+ -- Fixed setting the start time when querying in sacct to the
+    beginning of the day if not set previously.
+ -- Defined slurm_free_reservation_info_msg and slurm_free_topo_info_msg
+    in common/slurm_protocol_defs.h
+ -- Avoid generating error when a job step includes a memory specification and 
+    memory is not configured as a consumable resource.
+ -- Patch for small memory leak in src/common/plugstack.c
+ -- Fix sview search on node state.
+ -- Fix bug in which improperly formed job dependency specification can cause
+    slurmctld to abort.
+ -- Fixed issue where slurmctld wouldn't always get a message to send cluster
+    information when registering for the first time with the slurmdbd.
+ -- Add slurm_*_trigger.3 man pages for event trigger APIs.
+ -- Fix bug in job preemption logic that would free allocated memory twice.
+ -- Fix spelling issues (from Gennaro Oliva)
+ -- Fix issue when changing parents of an account in accounting all childern
+    weren't always sent to their respected slurmctlds until a restart.
+ -- Restore support for srun/salloc/sbatch option --hint=nomultithread to 
+    bind tasks to cores rather than threads (broken in slurm v2.1.0-pre5).
+ -- Fix issue where a 2.0 sacct could not talk correctly to a 2.1 slurmdbd.
+ -- BLUEGENE - Fix issue where no partitions have any nodes assigned them to
+    alert user no blocks can be created.
+ -- BLUEGENE - Fix smap to put BGP images when using -Dc on a Blue Gene/P system
+ -- Set SLURM_SUBMIT_DIR environment variable for srun and salloc commands to
+    match behavior of sbatch command.
+ -- Report WorkDir from "scontrol show job" command for jobs launched using
+    salloc and srun.
+ -- Update correctly the wckey when changing it on a pending job.
+ -- Set wckeyid correctly in accounting when cancelling a pending job.
+ -- BLUEGENE - critical fix where jobs would be killed incorrectly.
+ -- BLUEGENE - fix for sview putting multiple ionodes on to nodelists when
+    viewing the jobs tab.
 
 * Changes in SLURM 2.1.0
 =============================
@@ -4762,4 +4799,4 @@ documents those changes that are of interest to users and admins.
  -- Change directory to /tmp in slurmd if daemonizing.
  -- Logfiles are reopened on reconfigure.
  
-$Id: NEWS 19206 2010-01-05 17:01:44Z da $
+$Id: NEWS 19293 2010-01-21 01:45:33Z da $
diff --git a/RELEASE_NOTES b/RELEASE_NOTES
index 429ca726c32b2fb32ffd14d018536d5dc13fa567..61e117ed07dc603dcd79ead9fe58b5f14d87fc42 100644
--- a/RELEASE_NOTES
+++ b/RELEASE_NOTES
@@ -27,6 +27,8 @@ HIGHLIGHTS
 * The sched/gang plugin has been removed. The logic is now directly within the
   slurmctld daemon so that gang scheduling and/or job preemption can be
   performed with a backfill scheduler.
+* Sbatch response changed from "sbatch: Submitted batch job #" written to
+  stderr to "Submitted batch job #" written to stdout.
 * Preempted jobs can now be canceled, checkpointed or requeued rather than
   only suspended.
 * Support for QOS (Quality Of Service) has been added to the accounting
@@ -34,7 +36,7 @@ HIGHLIGHTS
 * Added "--signal=<int>@<time>" option to salloc, sbatch and srun commands to
   notify programs before reaching the end of their time limit.
 * Added squeue option "--start" to report expected start time of pending jobs.
-  The times are only set if the backfill scheduler is in use.
+  The times are only set if the backfill or builtin scheduler is in use.
 * The pam_slurm Pluggable Authentication Module for SLURM previously
   distributed separately has been moved within the main SLURM distribution
   and is packaged as a separate RPM.
diff --git a/doc/html/checkpoint_blcr.shtml b/doc/html/checkpoint_blcr.shtml
index f24e319f1e89b85517a6ddfaf71a91f5aada1f12..5d9a64133dd6987286aa17b8d2fb441c80ea9990 100644
--- a/doc/html/checkpoint_blcr.shtml
+++ b/doc/html/checkpoint_blcr.shtml
@@ -16,9 +16,23 @@ Functionality provided includes:
 node failure</li>
 </ol></p>
 
-<b>Note:</b> checkpoint/blcr cannot restart interactive jobs. It can
+<p>The general mode of operation is to
+<ol>
+<li>Start the job step using the <b>srun_cr</b> command as described 
+below.</li>
+<li>Create a checkpoint of <b>srun_cr</b> using BLCR's <b>cr_checkpoint</b>
+command and cancel the job. <b>srun_cr</b> will automatically checkpoint
+your job.</li>
+<li>Restart <b>srun_cr</b> using BLCR's <b>cr_restart</b> command.
+The job will be restarted using a newly allocated jobid.</li>
+</ol>
+
+<p><b>NOTE:</b> checkpoint/blcr cannot restart interactive jobs. It can
 create checkpoints for both interactive and batch steps, but only
-batch jobs can be restarted.
+batch jobs can be restarted.</p>
+
+<p><b>NOTE:</b> BLCR operation has been verified with MVAPICH2. 
+Some other MPI implementations should also work.</p>
 
 <h2>User Commands</h2>
 
@@ -33,9 +47,8 @@ Basic familiarity with SLURM commands is assumed.</p>
 checkpoints of the job step.
 By default, the job step will have no checkpoints created.
 Acceptable time formats include "minutes", "minutes:seconds",
-"hours:minutes:seconds", "days\-hours", "days\-hours:minutes" and
-"days\-hours:minutes:seconds".
-</li>
+"hours:minutes:seconds", "days-hours", "days-hours:minutes" and
+"days-hours:minutes:seconds".</li>
 <li><b>--checkpoint-dir</b>:Specify the directory where the checkpoint image
 files of a job step will be stored.
 The default value is the current working directory.
@@ -105,8 +118,12 @@ execution of the tasks from the previous checkpoint.</p>
 
 <p>Several options have been added to support checkpoint restart:</p>
 <ul>
-<li><b>--checkpoint</b>: Specify the interval between periodic checkpoint
-of a batch job, in seconds</li>
+<li><b>--checkpoint</b>: Specifies the interval between periodic checkpoint
+of a batch job.
+By default, the job will have no checkpoints created.
+Acceptable time formats include "minutes", "minutes:seconds",
+"hours:minutes:seconds", "days-hours", "days-hours:minutes" and
+"days-hours:minutes:seconds".</li>
 <li><b>--checkpoint-dir</b>:Specify the directory when the checkpoint image
 files of a batch job will be stored.
 The default value is the current working directory.
@@ -170,6 +187,6 @@ or reading job checkpoint files</li>
 
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 1 December 2009</p>
+<p style="text-align:center;">Last modified 7 January 2010</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/download.shtml b/doc/html/download.shtml
index 9e63f40529fd3627faefb9e5ebf86c8e5d793656..62e86616c131d2448f62917f8a6680a8e0f974db 100644
--- a/doc/html/download.shtml
+++ b/doc/html/download.shtml
@@ -129,9 +129,15 @@ Moab Cluster Suite</a></li>
 <i>contribs/perlapi</i> directory and packaged in the <i>perapi</i> RPM.</li>
 
 <li><a href="http://www.gingergeeks.co.uk/pyslurm/">PySlurm</a> is a
-Python/Pyrex module to interface with SLURM. There is also a Python module
-in the SLURM distribution to expand and collect hostlist expressions at
-<i>contribs/python/hostlist</i>.</li>
+Python/Pyrex module to interface with SLURM. 
+There is also a Python module to expand and collect hostlist expressions 
+available at <a href="http://www.nsc.liu.se/~kent/python-hostlist/">
+http://www.nsc.liu.se/~kent/python-hostlist/</a>.</li>
+
+<li><a href="http://www.lua.org/">Lua</a> may be used to implement a
+SLURM process tracking plugin.
+The Lua script available in <i>contribs/lua/protrack.lua</i> 
+implements containers using CPUSETs.
 </ul><br>
 
 <li><b>SPANK Plugins</b><br>
@@ -178,6 +184,6 @@ Portable Linux Processor Affinity (PLPA)</a></li>
 
 </ul>
 
-<p style="text-align:center;">Last modified 13 October 2009</p>
+<p style="text-align:center;">Last modified 6 January 2010</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/faq.shtml b/doc/html/faq.shtml
index 07386e9d61c7ecec146ddd2de52fcb552eadb7e0..056ea8293e849c1c5d80df6fa2a5ceedbcea6adc 100644
--- a/doc/html/faq.shtml
+++ b/doc/html/faq.shtml
@@ -40,6 +40,8 @@
   (e.g. place it into a <i>hold</i> state)?</a></li>
 <li><a href="#mem_limit">Why are jobs not getting the appropriate
   memory limit?</a></li>
+<li><a href="#mailing_list">Is an archive available of messages posted to 
+the <i>slurm-dev</i> mailing list?</a></li>
 </ol>
 
 <h2>For Administrators</h2>
@@ -624,6 +626,11 @@ problem described above.
 Use the same solution for the AS (Address Space), RSS (Resident Set Size),
 or other limits as needed.</p>
 
+<p><a name="mailing_list"><b>23. Is an archive available of messages posted to 
+the <i>slurm-dev</i> mailing list?</b></a><br>
+Yes, it is at <a href="http://groups.google.com/group/slurm-devel">
+http://groups.google.com/group/slurm-devel</a>
+
 <p class="footer"><a href="#top">top</a></p>
 
 
@@ -1239,6 +1246,6 @@ $ squeue -tpd -h -o "scontrol update jobid=%i priority=1000" >my.script
 
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 11 September 2009</p>
+<p style="text-align:center;">Last modified 5 January 2010</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/mpi_guide.shtml b/doc/html/mpi_guide.shtml
index ed4538f0735f3e9a8faa33c275fee69b675ae476..f0699cb55053004fb416c20c2eb530aca4f43882 100644
--- a/doc/html/mpi_guide.shtml
+++ b/doc/html/mpi_guide.shtml
@@ -159,6 +159,9 @@ $ srun -n20 a.out
 library integrated with SLURM</li>
 <li>Set the environment variable <b>PMI_DEBUG</b> to a numeric value
 of 1 or higher for the PMI library to print debugging information</li>
+<li>Information about building MPICH2 for use with SLURM is described on the 
+<a href="http://wiki.mcs.anl.gov/mpich2/index.php/Frequently_Asked_Questions#Q:_How_do_I_use_MPICH2_with_slurm.3F">
+MPICH2 FAQ</a> web page</li>
 </ul></p>
 <hr size=4 width="100%">
 
@@ -302,6 +305,6 @@ sbatch: Submitted batch job 1234
 tasks. These tasks are not managed by SLURM since they are launched
 outside of its control.</p>
 
-<p style="text-align:center;">Last modified 2 March 2009</p>
+<p style="text-align:center;">Last modified 20 January 2010</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/news.shtml b/doc/html/news.shtml
index 7d90c4de87f8d8160521eebd668292d2a562913e..47f40e7c244ede954b25dda8e7a54d79f3399ec7 100644
--- a/doc/html/news.shtml
+++ b/doc/html/news.shtml
@@ -69,6 +69,8 @@ is now available.</li>
 <p>SLURM Version 2.1 was released in January 2010.
 Major enhancements include:
 <ul>
+<li>Optimized resource allocation based upon network topology (e.g.
+hierarchical switches).</li>
 <li>Support for job preemption based upon job Quality of Service (QOS) in
 addition to queue priority.</li>
 <li>Support for time limits on individual job steps (in addition to the
@@ -84,8 +86,6 @@ not been finalized. Anyone desiring to perform SLURM development should notify
 <a href="mailto:slurm-dev@lists.llnl.gov">slurm-dev@lists.llnl.gov</a>
 to coordinate activities. Future development plans includes:
 <ul>
-<li>Optimized resource allocation based upon network topology (e.g.
-hierarchical switches).</li>
 <li>Modify more SLURM commands to operate between clusters.</li>
 <li>Support for BlueGene/Q systems.</li>
 <li>Permit resource allocations (jobs) to change size.</li>
diff --git a/doc/html/programmer_guide.shtml b/doc/html/programmer_guide.shtml
index bca0780524a9057ba6dc110a724dc74df4b4e3fa..ba3d1e24670f2e17d9cb1c5241f5ca4ada482e5f 100644
--- a/doc/html/programmer_guide.shtml
+++ b/doc/html/programmer_guide.shtml
@@ -99,7 +99,7 @@ SLURM.<br>
 <b>plugins</b>&#151;Plugin functions for various infrastructures or optional
 behavior. A separate subdirectory is used for each plugin class:<br>
 <ul>
-<li><b>accounting_storage</b> for specifing the type of storage for accounting,<br>
+<li><b>accounting_storage</b> for specifying the type of storage for accounting,<br>
 <li><b>auth</b> for user authentication,<br>
 <li><b>checkpoint</b> for system-initiated checkpoint and restart of user jobs,<br>
 <li><b>crypto</b> for cryptographic functions,<br>
diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am
index e33fbe2b389ae913b710d51519a228e75845aa4b..3e229630d173a9cfdb5c842f1ba24ab035700e7e 100644
--- a/doc/man/Makefile.am
+++ b/doc/man/Makefile.am
@@ -33,6 +33,7 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_allocation_msg_thr_create.3 \
 	man3/slurm_allocation_msg_thr_destroy.3 \
 	man3/slurm_api_version.3 \
+	man3/slurm_checkpoint.3 \
 	man3/slurm_checkpoint_able.3 \
 	man3/slurm_checkpoint_complete.3 \
 	man3/slurm_checkpoint_create.3 \
diff --git a/doc/man/Makefile.in b/doc/man/Makefile.in
index c900787672d143358e29e51b7f7e561511aca895..e410235f97f51edf23b1640085ce42396bf874f1 100644
--- a/doc/man/Makefile.in
+++ b/doc/man/Makefile.in
@@ -326,6 +326,7 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_allocation_msg_thr_create.3 \
 	man3/slurm_allocation_msg_thr_destroy.3 \
 	man3/slurm_api_version.3 \
+	man3/slurm_checkpoint.3 \
 	man3/slurm_checkpoint_able.3 \
 	man3/slurm_checkpoint_complete.3 \
 	man3/slurm_checkpoint_create.3 \
diff --git a/doc/man/man1/sacct.1 b/doc/man/man1/sacct.1
index f2904adacb0943f22cb2915a32bcdb5befb6b9ed..9d8f219b6aabfccf710a8f8a558cd81ef6d38a7f 100644
--- a/doc/man/man1/sacct.1
+++ b/doc/man/man1/sacct.1
@@ -238,7 +238,7 @@ a ranged string.
 
 .TP
 \f3\-o \fP\f3,\fP \f3\-\-format\fP
-Comma seperated list of fields. (use "\-\-helpformat" for a list of
+Comma separated list of fields. (use "\-\-helpformat" for a list of
 available fields).
 
 NOTE: When using the format option for listing various fields you can put a
@@ -265,7 +265,7 @@ output will be '|' delimited without a '|' at the end
 .TP
 \f3\-r \fP\f3,\fP \f3\-\-partition\fP
 
-Comma seperated list of partitions to select jobs and job steps
+Comma separated list of partitions to select jobs and job steps
 from. The default is all partitions.
 
 .TP
@@ -329,7 +329,7 @@ would be truncated to \-\-starttime.  The same for end time and \-\-endtime.
 
 .TP
 \f3\-u \fP\f2uid_list\fP\f3,\fP  \f3\-\-uid\fP\f3=\fP\f2uid_list\fP \f3\-\-user\fP\f3=\fP\f2user_list\fP
-Use this comma seperated list of uids or user names to select jobs to display.  By default, the running
+Use this comma separated list of uids or user names to select jobs to display.  By default, the running
 user's uid is used.
 
 .TP
@@ -486,7 +486,12 @@ It is in the form:
 
 .TP
 \f3jobname\fP
-The name of the job or job step.
+The name of the job or job step. The \f3slurm_accounting.log\fP file
+is a space delimited file. Because of this if a space is used in the 
+jobname an underscore is substituted for the space before the record 
+is written to the accounting file. So when the jobname is displayed 
+by sacct the jobname that had a space in it will now have an underscore 
+in place of the space.
 
 .TP
 \f3layout\fP
diff --git a/doc/man/man1/sacctmgr.1 b/doc/man/man1/sacctmgr.1
index 158e514e39c63e345089c4ada7ff707f80a326a9..b5d817e3aa056d7ffa75d93bc6341859eea69d54 100644
--- a/doc/man/man1/sacctmgr.1
+++ b/doc/man/man1/sacctmgr.1
@@ -113,7 +113,7 @@ Display a description of sacctmgr options and commands.
 \fBlist\fR <\fIENTITY\fR> [<\fISPECS\fR>]
 Display information about the specified entity.
 By default, all entries are displayed, you can narrow results by
-specifing SPECS in your query.
+specifying SPECS in your query.
 Identical to the \fBshow\fR command.
 
 .TP
@@ -141,7 +141,7 @@ Identical to the \fBexit\fR command.
 \fBshow\fR <\fIENTITY\fR> [<\fISPECS\fR>]
 Display information about the specified entity.
 By default, all entries are displayed, you can narrow results by
-specifing SPECS in your query.
+specifying SPECS in your query.
 Identical to the \fBlist\fR command.
 
 .TP
@@ -1350,7 +1350,7 @@ this fashion.
 
 .br
 If you are looking to only add the qos expedite to only a certain
-account and or cluster you can do that by specifing them in the
+account and or cluster you can do that by specifying them in the
 sacctmgr line.
 .br
 
diff --git a/doc/man/man1/salloc.1 b/doc/man/man1/salloc.1
index 86f0c3181548dc403c27a1cae05340ded6b483dd..4536bab0e0a0d662e2a32e4d11b2f055a02f763b 100644
--- a/doc/man/man1/salloc.1
+++ b/doc/man/man1/salloc.1
@@ -698,8 +698,18 @@ Silence salloc's use of the terminal bell. Also see the option \fB\-\-bell\fR.
 
 .TP
 \fB\-\-no\-shell\fR
-immediately exit after allocating resources instead of spawning a
-shell when used with the \fB\-A\fR, \fB\-\-allocate\fR option.
+immediately exit after allocating resources, without running a
+command. However, the SLURM job will still be created and will remain
+active and will own the allocated resources as long as it is active.
+You will have a SLURM job id with no associated processes or
+tasks. You can submit \fBsrun\fR commands against this resource allocation,
+if you specify the \fB\-\-jobid=\fR option with the job id of this SLURM job.
+Or, this can be used to temporarily reserve a set of resources so that
+other jobs cannot use them for some period of time.  (Note that the
+SLURM job is subject to the normal constraints on jobs, including time
+limits, so that eventually the job will terminate and the resources
+will be freed, or you can terminate the job manually using the
+\fBscancel\fR command.) 
 
 .TP
 \fB\-O\fR, \fB\-\-overcommit\fR
@@ -987,6 +997,9 @@ Total number of nodes in the job allocation.
 \fBSLURM_MEM_BIND\fR
 Set to value of the \-\-mem_bind\fR option.
 .TP
+\fBSLURM_SUBMIT_DIR\fR
+The directory from which \fBsalloc\fR was invoked.
+.TP
 \fBSLURM_NTASKS_PER_NODE\fR
 Set to value of the \-\-ntasks\-per\-node\fR option, if specified.
 .TP
diff --git a/doc/man/man1/sbatch.1 b/doc/man/man1/sbatch.1
index 54d493621a20cb18ee8346ab1a7599de7dc8556d..96e3172cf310bd9970ada91ec2ada377a8a3bca9 100644
--- a/doc/man/man1/sbatch.1
+++ b/doc/man/man1/sbatch.1
@@ -95,7 +95,6 @@ For example:
    \-\-begin=now+60           (seconds by default)
    \-\-begin=2010\-01\-20T12:34:00
 .fi
-
 .RS
 .PP
 Notes on date/time specifications:
diff --git a/doc/man/man1/sbcast.1 b/doc/man/man1/sbcast.1
index 20d8bf0835fd6cdb918029fa04bd5af087a1d041..ca9990c553e825bc89d562c3b23f89c03007f910 100644
--- a/doc/man/man1/sbcast.1
+++ b/doc/man/man1/sbcast.1
@@ -93,7 +93,7 @@ Using a batch script, transmit local file \fBmy.prog\fR to
 sbcast my.prog /tmp/my.prog
 srun /tmp/my.prog
 
-> srun \-\-nodes=8 \-\-batch my.job
+> sbatch \-\-nodes=8 my.job
 srun: jobid 12345 submitted
 .fi
 
diff --git a/doc/man/man1/scontrol.1 b/doc/man/man1/scontrol.1
index 6bd8cc5d94be21fb2dfcb943c8fe8c2e57f02940..d04dcdfc40f07c8126dc30e69c16619b9e5da0e1 100644
--- a/doc/man/man1/scontrol.1
+++ b/doc/man/man1/scontrol.1
@@ -334,9 +334,36 @@ Possible values on Blue Gene are "MESH", "TORUS" and "NAV"
 Set the job's requirement for contiguous (consecutive) nodes to be allocated.
 Possible values are "YES" and "NO".
 .TP
-\fIDependency\fP=<job_id>
-Defer job's initiation until specified job_id completes.
-Cancel dependency with job_id value of "0", "Dependency=0".
+\fIDependency\fP=<dependency_list>
+Defer job's initiation until specified job dependency specification 
+is satisfied.
+Cancel dependency with an empty dependency_list (e.g. "Dependency=").
+<\fIdependency_list\fR> is of the form
+<\fItype:job_id[:job_id][,type:job_id[:job_id]]\fR>.
+Many jobs can share the same dependency and these jobs may even belong to
+different  users. 
+.PD
+.RS
+.TP
+\fBafter:job_id[:jobid...]\fR
+This job can begin execution after the specified jobs have begun
+execution.
+.TP
+\fBafterany:job_id[:jobid...]\fR
+This job can begin execution after the specified jobs have terminated.
+.TP
+\fBafternotok:job_id[:jobid...]\fR
+This job can begin execution after the specified jobs have terminated
+in some failed state (non-zero exit code, node failure, timed out, etc).
+.TP
+\fBafterok:job_id[:jobid...]\fR
+This job can begin execution after the specified jobs have successfully
+executed (ran to completion with non-zero exit code).
+.TP
+\fBsingleton\fR
+This job can begin execution after any previously launched jobs
+sharing the same job name and user have terminated.
+.RE
 .TP
 \fIExcNodeList\fP=<nodes>
 Set the job's list of excluded node. Multiple node names may be
@@ -476,36 +503,47 @@ described above.  However, the following fields displayed by the show
 job command are read\-only and cannot be modified:
 
 .TP
-\fIAllocNode:Sid\fp
+\fIAllocNode:Sid\fP
 Local node and system id making the resource allocation.
 .TP
-\fIEndTime\fp
+\fIEndTime\fP
 The time the job is expected to terminate based on the job's time
 limit.  When the job ends sooner, this field will be updated with the
 actual end time.
 .TP
-\fIJobState\fp
+\fIJobState\fP
 The current state of the job.
 .TP
-\fINodeList\fp
+\fINodeList\fP
 The list of nodes allocated to the job.
 .TP
-\fINodeListIndices\fp
+\fINodeListIndices\fP
 The NodeIndices expose the internal indices into the node table
 associated with the node(s) allocated to the job.
 .TP
-\fIPreSusTime\fp
+\fIPreSusTime\fP
 Time the job ran prior to last suspend.
 .TP
-\fIReason\fp
+\fIReason\fP
 The reason job is not running: e.g., waiting "Resources".
 .TP
-\fISuspendTime\fp
+\fISuspendTime\fP
 Time the job was last suspended or resumed.
 .TP
-\fIUserId\fp  \fIGroupId\fp
+\fIUserId\fP  \fIGroupId\fP
 The user and group under which the job was submitted.
 .TP
+NOTE on information displayed for various job states: 
+When you submit a request for the "show job" function the scontrol
+process makes an RPC request call to slurmctld with a REQUEST_JOB_INFO
+message type.  If the state of the job is PENDING, then it returns
+some detail information such as: min_nodes, min_procs, cpus_per_task,
+etc. If the state is other than PENDING the code assumes that it is in
+a further state such as RUNNING, COMPLETE, etc. In these cases the
+code explicitly returns zero for these values. These values are
+meaningless once the job resources have been allocated and the job has
+started.
+.TP
 \fBSPECIFICATIONS FOR UPDATE COMMAND, NODES\fR
 .TP
 \fINodeName\fP=<name>
diff --git a/doc/man/man1/squeue.1 b/doc/man/man1/squeue.1
index 16941e509f9fcae50b14db4685231c0990f7bda4..0b0123c418cd3befb8d0aaffb3a6f296c78263ac 100644
--- a/doc/man/man1/squeue.1
+++ b/doc/man/man1/squeue.1
@@ -88,8 +88,8 @@ is the minimum field size.
 If no size is specified, whatever is needed to print the information will be used.
 .TP
 \fI .\fR
-indicates the output should be left justified.
-By default, output is right justified.
+indicates the output should be right justified and size must be specified.
+By default, output is left justified.
 .RE
 
 .IP
@@ -483,7 +483,7 @@ Print the job steps in the debug partition sorted by user:
 .br
 # squeue -s -p debug -S u
 .br
-  STEPID        NAME PARTITION     USER TIME_USED NODELIST(REASON)
+  STEPID        NAME PARTITION     USER      TIME NODELIST
 .br
  65552.1       test1     debug    alice      0:23 dev[1-4]
 .br
@@ -497,13 +497,13 @@ Print information only about jobs 12345,12345, and 12348:
 .br
 # squeue --jobs 12345,12346,12348
 .br
- JOBID PARTITION NAME USER ST TIME_USED NODES NODELIST(REASON)
+ JOBID PARTITION NAME USER ST  TIME  NODES NODELIST(REASON)
 .br
- 12345     debug job1 dave  R      0:21     4 dev[9-12]
+ 12345     debug job1 dave  R   0:21     4 dev[9-12]
 .br
- 12346     debug job2 dave PD      0:00     8 (Resources)
+ 12346     debug job2 dave PD   0:00     8 (Resources)
 .br
- 12348     debug job3 ed   PD      0:00     4 (Priority)
+ 12348     debug job3 ed   PD   0:00     4 (Priority)
 .ec
 
 .eo
@@ -511,9 +511,9 @@ Print information only about job step 65552.1:
 .br
 # squeue --steps 65552.1
 .br
-  STEPID     NAME PARTITION    USER    TIME_USED NODELIST(REASON)
+  STEPID     NAME PARTITION    USER    TIME  NODELIST
 .br
- 65552.1    test2     debug   alice        12:49 dev[1-4]
+ 65552.1    test2     debug   alice   12:49  dev[1-4]
 .ec
 
 .SH "COPYING"
diff --git a/doc/man/man1/srun.1 b/doc/man/man1/srun.1
index 394dc922365165008b522e117c314b0773af83c9..8de02bb3fba444060ba568e5b687714204af0a15 100644
--- a/doc/man/man1/srun.1
+++ b/doc/man/man1/srun.1
@@ -440,7 +440,11 @@ a terminal is not possible.
 \fB\-J\fR, \fB\-\-job\-name\fR=<\fIjobname\fR>
 Specify a name for the job. The specified name will appear along with
 the job id number when querying running jobs on the system. The default
-is the supplied \fBexecutable\fR program's name.
+is the supplied \fBexecutable\fR program's name. NOTE: This information 
+may be written to the slurm_jobacct.log file. This file is space delimited
+so if a space is used in the \fIjobname\fR name it will cause problems in 
+properly displaying the contents of the slurm_jobacct.log file when the 
+\fBsacct\fR command is used.
 
 .TP
 \fB\-\-jobid\fR=<\fIjobid\fR>
@@ -518,12 +522,14 @@ and
 https://computing.llnl.gov/linux/slurm/dist_plane.html.
 .TP
 .B arbitrary
-The arbitrary method of distribution will allocate tasks in\-order as
-listed in file designated by the environment variable SLURM_HOSTFILE.  If
-this variable is listed it will over ride any other method specified.
-If not set the method will default to block.  Inside the hostfile must
-contain at minimum the number of hosts requested.  If requesting tasks
-(\-n) your tasks will be laid out on the nodes in the order of the file.
+The arbitrary method of distribution will allocate processes in\-order
+as listed in file designated by the environment variable
+SLURM_HOSTFILE.  If this variable is listed it will over ride any
+other method specified.  If not set the method will default to block.
+Inside the hostfile must contain at minimum the number of hosts
+requested and be one per line or comma separated.  If specifying a
+task count (\fB\-n\fR, \fB\-\-ntasks\fR=<\fInumber\fR>), your tasks 
+will be laid out on the nodes in the order of the file.
 .RE
 
 .TP
@@ -784,10 +790,13 @@ allocation as one step in a larger job script.
 
 .TP
 \fB\-O\fR, \fB\-\-overcommit\fR
-Overcommit resources.  Normally, \fBsrun\fR will allocate one task
-per processor.  By specifying \fB\-\-overcommit\fR you are explicitly
-allowing more than one task per processor.  However no more than
-\fBMAX_TASKS_PER_NODE\fR tasks are permitted to execute per node.
+Overcommit resources. Normally, \fBsrun\fR
+will not allocate more than one process per CPU. By specifying
+\fB\-\-overcommit\fR you are explicitly allowing more than one process
+per CPU. However no more than \fBMAX_TASKS_PER_NODE\fR tasks are 
+permitted to execute per node.  NOTE: \fBMAX_TASKS_PER_NODE\fR is 
+defined in the file \fIslurm.h\fR and is not a variable, it is set at 
+SLURM build time.
 
 .TP
 \fB\-o\fR, \fB\-\-output\fR=<\fImode\fR>
@@ -948,9 +957,12 @@ the job. By default only errors are displayed.
 
 .TP
 \fB\-T\fR, \fB\-\-threads\fR=<\fInthreads\fR>
-Request that \fBsrun\fR
-use \fInthreads\fR to initiate and control the parallel job. The
-default value is the smaller of 60 or the number of nodes allocated.
+Allows limiting the number of concurrent threads used to
+send the job request from the srun process to the slurmd
+processes on the allocated nodes. Default is to use one
+thread per allocated node up to a maximum of 60 concurrent
+threads. Specifying this option limits the number of
+concurrent threads to \fInthreads\fR (less than or equal to 60).
 This should only be used to set a low thread count for testing on
 very small memory computers.
 
@@ -1062,6 +1074,14 @@ Request that a specific list of hosts not be included in the resources
 allocated to this job. The host list will be assumed to be a filename
 if it contains a "/"character.
 
+.TP
+\fB\-Z\fR, \fB\-\-no\-allocate\fR
+Run the specified tasks on a set of nodes without creating a SLURM
+"job" in the SLURM queue structure, bypassing the normal resource
+allocation step.  The list of nodes must be specified with the 
+\fB\-w\fR, \fB\-\-nodelist\fR option.  This is a privileged option 
+only available for the users "SlurmUser" and "root".
+
 .PP
 The following options support Blue Gene systems, but may be
 applicable to other systems as well.
@@ -1543,6 +1563,9 @@ The MPI rank (or relative process ID) of the current process
 \fBSLURM_STEPID\fR
 The step ID of the current job
 .TP
+\fBSLURM_SUBMIT_DIR\fR
+The directory from which \fBsrun\fR was invoked.
+.TP
 \fBSLURM_TASK_PID\fR
 The process ID of the task being started.
 .TP
@@ -1826,7 +1849,7 @@ FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
 
 .SH "SEE ALSO"
-\fBsalloc\fR(1), \fBsttach\fR(1), \fBsbatch\fR(1), \fBsbcast\fR(1),
+\fBsalloc\fR(1), \fBsattach\fR(1), \fBsbatch\fR(1), \fBsbcast\fR(1),
 \fBscancel\fR(1), \fBscontrol\fR(1), \fBsqueue\fR(1), \fBslurm.conf\fR(5),
 \fBsched_setaffinity\fR(2), \fBnuma\fR(3)
 \fBgetrlimit\fR(2),
diff --git a/doc/man/man1/sstat.1 b/doc/man/man1/sstat.1
index 3501dcdfe2cfd0ec4ca373218d5c78623f13e141..45c5031116eb32552234787f6fe9290515af67a3 100644
--- a/doc/man/man1/sstat.1
+++ b/doc/man/man1/sstat.1
@@ -49,7 +49,7 @@ Displays a general help message.
 Format is <job(.step)>. Stat this job step or comma-separated list of
 job steps. This option is required.  The step portion will default to
 step 0 if not specified, unless the \-\-allsteps flag is set where not
-specifing a step will result in all running steps to be displayed.
+specifying a step will result in all running steps to be displayed.
 
 .TP
 \f3\-n \fP\f3,\fP \f3\-\-noheader\fP
@@ -57,7 +57,7 @@ No header will be added to the beginning of output. The default is to print a he
 
 .TP
 \f3\-o \fP\f3,\fP \f3\-\-format\fP,\fP \f3\-\-fields\fP
-Comma seperated list of fields.
+Comma separated list of fields.
 (use '\-\-helpformat' for a list of available fields).
 
 .TP
diff --git a/doc/man/man3/slurm_checkpoint.3 b/doc/man/man3/slurm_checkpoint.3
new file mode 100644
index 0000000000000000000000000000000000000000..32120a6bbb2293314b111f8b985f85a5cb4f7347
--- /dev/null
+++ b/doc/man/man3/slurm_checkpoint.3
@@ -0,0 +1 @@
+.so man3/slurm_checkpoint_error.3
diff --git a/doc/man/man3/slurm_clear_trigger.3 b/doc/man/man3/slurm_clear_trigger.3
index 2fd720318c89545d753f5eea5d6b54846f26f671..92435e3598d830d76e7eec822155f14f75ce8ba5 100644
--- a/doc/man/man3/slurm_clear_trigger.3
+++ b/doc/man/man3/slurm_clear_trigger.3
@@ -1 +1,111 @@
-.so man3/slurm_trigger.3
+.TH "Slurm API" "3" "January 2010" "Morris Jette" "Slurm event trigger management functions"
+
+.SH "NAME"
+
+slurm_clear_trigger, slurm_free_trigger_msg, 
+slurm_get_triggers, slurm_set_trigger \- Slurm event trigger management functions
+
+.SH "SYNTAX"
+.LP
+#include <slurm/slurm.h>
+.LP
+.LP
+int \fBslurm_set_trigger\fR (
+.br
+	trigger_info_t *\fItrigger_info\fP
+.br
+);
+.LP
+int \fBslurm_clear_trigger\fR (
+.br
+	trigger_info_t *\fItrigger_info\fP
+.br
+);
+.LP
+int \fBslurm_get_triggers\fR (
+.br
+	trigger_info_msg_t **\fItrigger_info_msg\fP
+.br
+);
+.LP
+int \fBslurm_free_trigger\fR (
+.br
+	trigger_info_msg_t *\fItrigger_info_msg\fP
+.br
+);
+
+.SH "ARGUMENTS"
+.LP
+.TP
+\fItrigger_info\fP
+Information about one event trigger including trigger ID, type, time offset, etc.
+See \fIslurm.h\fP for details.
+.TP
+\fItrigger_info_msg\fP
+A data structure including an array of \fItrigger_info\fP structures 
+plus their count.
+See \fIslurm.h\fP for details.
+
+.SH "DESCRIPTION"
+.LP
+\fBslurm_set_trigger\fR Create a new event trigger.
+Note that any trigger ID specified in \fItrigger_info\fP is unused.
+.LP
+\fBslurm_clear_trigger\fR Clear or remove existing event triggers.
+If a trigger ID is specified then only that one trigger will be cleared. 
+If a job ID or node name is specified, then all triggers associated with 
+that resource are cleared. 
+.LP
+\fBslurm_get_triggers\fR Get information about all currently configured
+event triggers. To avoid memory leaks, always follow this with a call
+to the \fBslurm_free_trigger\fR function.
+.LP
+\fBslurm_free_trigger\fR Release the memory allocated for the array 
+returned by the \fBslurm_get_triggers\fR function.
+
+.SH "RETURN VALUE"
+.LP
+\fBSLURM_SUCCESS\fR is returned on successful completion, 
+otherwise an error code is returned as described below.
+
+.SH "ERRORS"
+.LP
+\fBEINVAL\fR Invalid argument
+.LP
+\fBESLURM_ACCESS_DENIED\fR Attempt by non\-priviledged user to set an 
+event trigger.
+.LP
+\fBESLURM_ALREADY_DONE\fR Attempt to set an event trigger for a job which 
+has already completed.
+.LP
+\fBESLURM_INVALID_NODE_NAME\fR Attempt to set an event trigger for a node 
+name which is invalid.
+.LP
+\fBESLURM_INVALID_JOB_ID\fR the specified job id does not exist.
+
+.SH "NOTE"
+These functions are included in the libslurm library,
+which must be linked to your process for use
+(e.g. "cc \-lslurm myprog.c").
+
+.SH "COPYING"
+Copyright (C) 2010 Lawrence Livermore National Security.
+Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+CODE\-OCEC\-09\-009. All rights reserved.
+.LP
+This file is part of SLURM, a resource management program.
+For details, see <https://computing.llnl.gov/linux/slurm/>.
+.LP
+SLURM is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2 of the License, or (at your option)
+any later version.
+.LP
+SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+details.
+.SH "SEE ALSO"
+.LP
+\fBstrigger\fR(1),
+\fBslurm_get_errno\fR(3), \fBslurm_perror\fR(3), \fBslurm_strerror\fR(3)
diff --git a/doc/man/man3/slurm_free_trigger_msg.3 b/doc/man/man3/slurm_free_trigger_msg.3
index 2fd720318c89545d753f5eea5d6b54846f26f671..edfff17d82e01f5eaec81d8f169b4b31cac87b6a 100644
--- a/doc/man/man3/slurm_free_trigger_msg.3
+++ b/doc/man/man3/slurm_free_trigger_msg.3
@@ -1 +1 @@
-.so man3/slurm_trigger.3
+.so man3/slurm_clear_trigger.3
diff --git a/doc/man/man3/slurm_get_triggers.3 b/doc/man/man3/slurm_get_triggers.3
index 2fd720318c89545d753f5eea5d6b54846f26f671..edfff17d82e01f5eaec81d8f169b4b31cac87b6a 100644
--- a/doc/man/man3/slurm_get_triggers.3
+++ b/doc/man/man3/slurm_get_triggers.3
@@ -1 +1 @@
-.so man3/slurm_trigger.3
+.so man3/slurm_clear_trigger.3
diff --git a/doc/man/man3/slurm_set_trigger.3 b/doc/man/man3/slurm_set_trigger.3
index 2fd720318c89545d753f5eea5d6b54846f26f671..edfff17d82e01f5eaec81d8f169b4b31cac87b6a 100644
--- a/doc/man/man3/slurm_set_trigger.3
+++ b/doc/man/man3/slurm_set_trigger.3
@@ -1 +1 @@
-.so man3/slurm_trigger.3
+.so man3/slurm_clear_trigger.3
diff --git a/doc/man/man5/slurm.conf.5 b/doc/man/man5/slurm.conf.5
index 99209ebf826de12d2ebd7d8eaac42b1863f00d38..65c24aedcf079b99ef049b8e46b960635201a632 100644
--- a/doc/man/man5/slurm.conf.5
+++ b/doc/man/man5/slurm.conf.5
@@ -34,6 +34,17 @@ If a line begins with the word "Include" followed by whitespace
 and then a file name, that file will be included inline with the current
 configuration file.
 .LP
+Note on file permissions:
+.LP
+The \fIslurm.conf\fR file must be readable by all users of SLURM, since it
+is used by many of the SLURM commands.  Other files that are defined
+in the \fIslurm.conf\fR file, such as log files and job accounting files,
+may need to be created/owned by the "SlurmUser" uid to be successfully
+accessed.  Use the "chown" and "chmod" commands to set the ownership
+and permissions appropriately.
+
+.SH "PARAMETERS"
+.LP
 The overall configuration parameters available include:
 
 .TP
@@ -238,7 +249,7 @@ By default the \fBControlAddr\fR will be identical in value to
 .TP
 \fBControlMachine\fR
 The short hostname of the machine where SLURM control functions are
-executed (i.e. the name returned by the command "hostname -s", use
+executed (i.e. the name returned by the command "hostname \-s", use
 "tux001" rather than "tux001.my.com").
 This value must be specified.
 In order to support some high availability architectures, multiple
@@ -1471,17 +1482,25 @@ The value may not exceed 65533 seconds.
 
 .TP
 \fBSrunEpilog\fR
-Fully qualified pathname of an executable to be run by srun following the
-completion of a job step.  The command line arguments for the executable will
-be the command and arguments of the job step.  This configuration parameter
-may be overridden by srun's \fB\-\-epilog\fR parameter.
+Fully qualified pathname of an executable to be run by srun following
+the completion of a job step.  The command line arguments for the
+executable will be the command and arguments of the job step.  This
+configuration parameter may be overridden by srun's \fB\-\-epilog\fR
+parameter. Note that while the other "Epilog" executables (e.g.,
+TaskEpilog) are run by slurmd on the compute nodes where the tasks are
+executed, the \fBSrunEpilog\fR runs on the node where the "srun" is
+executing.
 
 .TP
 \fBSrunProlog\fR
-Fully qualified pathname of an executable to be run by srun prior to the
-launch of a job step.  The command line arguments for the executable will
-be the command and arguments of the job step.  This configuration parameter
-may be overridden by srun's \fB\-\-prolog\fR parameter.
+Fully qualified pathname of an executable to be run by srun prior to
+the launch of a job step.  The command line arguments for the
+executable will be the command and arguments of the job step.  This
+configuration parameter may be overridden by srun's \fB\-\-prolog\fR
+parameter. Note that while the other "Prolog" executables (e.g.,
+TaskProlog) are run by slurmd on the compute nodes where the tasks are
+executed, the \fBSrunProlog\fR runs on the node where the "srun" is
+executing.
 
 .TP
 \fBStateSaveLocation\fR
diff --git a/doc/man/man5/wiki.conf.5 b/doc/man/man5/wiki.conf.5
index e6f45689e3c143d980e9db037ceda4d974df1fd3..7248ba70f2cd3ed811b4574d6a8114782a1ace7e 100644
--- a/doc/man/man5/wiki.conf.5
+++ b/doc/man/man5/wiki.conf.5
@@ -87,6 +87,8 @@ separator between their names.
 \fBHostFormat\fR
 Controls the format of host lists exchanged between SLURM and Moab.
 The default value is "0".
+Not applicable to wiki plugin, only the wiki2 plugin.
+
 .RS
 .TP
 \fB0\fR
diff --git a/slurm.spec b/slurm.spec
index 086a623a039247c92284b45dcc8c01ad8ef96cf8..6597db3d4b1ea7db87d09ec004cf7fcfcff0fa43 100644
--- a/slurm.spec
+++ b/slurm.spec
@@ -83,14 +83,14 @@
 %endif
 
 Name:    slurm
-Version: 2.1.0
+Version: 2.1.1
 Release: 1%{?dist}
 
 Summary: Simple Linux Utility for Resource Management
 
 License: GPL
 Group: System Environment/Base
-Source: slurm-2.1.0.tar.bz2
+Source: slurm-2.1.1.tar.bz2
 BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}
 URL: https://computing.llnl.gov/linux/slurm/
 
@@ -350,7 +350,7 @@ Gives the ability for SLURM to use Berkeley Lab Checkpoint/Restart
 #############################################################################
 
 %prep
-%setup -n slurm-2.1.0
+%setup -n slurm-2.1.1
 
 %build
 %configure --program-prefix=%{?_program_prefix:%{_program_prefix}} \
diff --git a/src/api/allocate.c b/src/api/allocate.c
index 4bc8c72cb6783c4aaf952cc705f9fc1e3041986b..9cafa1eb12d15b43df61a521712d720befceb145 100644
--- a/src/api/allocate.c
+++ b/src/api/allocate.c
@@ -1,6 +1,6 @@
 /*****************************************************************************\
  *  allocate.c - allocate nodes for a job or step with supplied contraints
- *  $Id: allocate.c 19095 2009-12-01 22:59:18Z da $
+ *  $Id: allocate.c 19271 2010-01-19 21:00:56Z jette $
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
@@ -271,7 +271,7 @@ slurm_allocate_resources_blocking (const job_desc_msg_t *user_req,
 		break;
 	default:
 		errnum = SLURM_UNEXPECTED_MSG_ERROR;
-		return NULL;
+		resp = NULL;
 	}
 
 	destroy_forward(&req_msg.forward);
@@ -294,16 +294,25 @@ int slurm_job_will_run (job_desc_msg_t *req)
 	slurm_msg_t req_msg, resp_msg;
 	will_run_response_msg_t *will_run_resp;
 	char buf[64];
+	bool host_set = false;
+	int rc;
 
 	/* req.immediate = true;    implicit */
 	if ((req->alloc_node == NULL) &&
-	    (gethostname_short(buf, sizeof(buf)) == 0))
+	    (gethostname_short(buf, sizeof(buf)) == 0)) {
 		req->alloc_node = buf;
+		host_set = true;
+	}
 	slurm_msg_t_init(&req_msg);
 	req_msg.msg_type = REQUEST_JOB_WILL_RUN;
 	req_msg.data     = req;
 
-	if (slurm_send_recv_controller_msg(&req_msg, &resp_msg) < 0)
+	rc = slurm_send_recv_controller_msg(&req_msg, &resp_msg);
+
+	if (host_set)
+		req->alloc_node = NULL;
+
+	if (rc < 0)
 		return SLURM_SOCKET_ERROR;
 
 	switch (resp_msg.msg_type) {
@@ -557,15 +566,17 @@ char *slurm_read_hostfile(char *filename, int n)
 	if (filename == NULL || strlen(filename) == 0)
 		return NULL;
 
-	if((fp = fopen(filename, "r")) == NULL) {
+	if ((fp = fopen(filename, "r")) == NULL) {
 		error("slurm_allocate_resources error opening file %s, %m",
 		      filename);
 		return NULL;
 	}
 
 	hostlist = hostlist_create(NULL);
-	if (hostlist == NULL)
+	if (hostlist == NULL) {
+		fclose(fp);
 		return NULL;
+	}
 
 	while (fgets(in_line, BUFFER_SIZE, fp) != NULL) {
 		line_num++;
@@ -574,6 +585,7 @@ char *slurm_read_hostfile(char *filename, int n)
 			error ("Line %d, of hostfile %s too long",
 			       line_num, filename);
 			fclose (fp);
+			hostlist_destroy(hostlist);
 			return NULL;
 		}
 
@@ -598,7 +610,7 @@ char *slurm_read_hostfile(char *filename, int n)
 		}
 
 		hostlist_push(hostlist, in_line);
-		if(n != (int)NO_VAL && hostlist_count(hostlist) == n)
+		if (n != (int)NO_VAL && hostlist_count(hostlist) == n)
 			break;
 	}
 	fclose(fp);
@@ -743,6 +755,7 @@ _accept_msg_connection(int listen_fd,
 		}
 
 		error("_accept_msg_connection[%s]: %m", host);
+		slurm_close_accepted_conn(conn_fd);
 		return SLURM_ERROR;
 	}
 
diff --git a/src/api/allocate_msg.c b/src/api/allocate_msg.c
index a8f602eb11c8821f0e21876657c76ee24bfa38b4..3c98215c434f17224caf64c8f4d39841f59d5864 100644
--- a/src/api/allocate_msg.c
+++ b/src/api/allocate_msg.c
@@ -130,6 +130,11 @@ extern allocation_msg_thread_t *slurm_allocation_msg_thr_create(
 	obj = eio_obj_create(sock, &message_socket_ops, (void *)msg_thr);
 
 	msg_thr->handle = eio_handle_create();
+	if (!msg_thr->handle) {
+		error("failed to create eio handle");
+		xfree(msg_thr);
+		return NULL;
+	}
 	eio_new_initial_obj(msg_thr->handle, obj);
 	pthread_mutex_lock(&msg_thr_start_lock);
 	slurm_attr_init(&attr);
diff --git a/src/api/job_info.c b/src/api/job_info.c
index ffc80522b1fbc135ae0d2988903b3168c1b4d416..5a85d69013643be5c4ccf02979fbf3e2bd58406f 100644
--- a/src/api/job_info.c
+++ b/src/api/job_info.c
@@ -386,6 +386,7 @@ slurm_sprint_job_info ( job_info_t * job_ptr, int one_liner )
 	hl_last = hostlist_create(NULL);
 	if (!hl_last) {
 		error("slurm_sprint_job_info: hostlist_create: NULL");
+		hostlist_destroy(hl);
 		return NULL;
 	}
 
@@ -411,6 +412,8 @@ slurm_sprint_job_info ( job_info_t * job_ptr, int one_liner )
 		core_bitmap = bit_alloc(bit_reps);
 		if (core_bitmap == NULL) {
 			error("bit_alloc malloc failure");
+			hostlist_destroy(hl_last);
+			hostlist_destroy(hl);
 			return NULL;
 		}
 
@@ -572,7 +575,7 @@ line13:
 		 job_ptr->contiguous, job_ptr->licenses, job_ptr->network);
 	xstrcat(out, tmp_line);
 
-	/****** Lines 16, 17 (optional, batch only) ******/
+	/****** Line 16 (optional, batch only) ******/
 	if (job_ptr->batch_flag) {
 		if (one_liner)
 			xstrcat(out, " ");
@@ -580,14 +583,15 @@ line13:
 			xstrcat(out, "\n   ");
 		sprintf(tmp_line, "Command=%s", job_ptr->command);
 		xstrcat(out, tmp_line);
-
-		if (one_liner)
-			xstrcat(out, " ");
-		else
-			xstrcat(out, "\n   ");
-		sprintf(tmp_line, "WorkDir=%s", job_ptr->work_dir);
-		xstrcat(out, tmp_line);
 	}
+	if (one_liner)
+		xstrcat(out, " ");
+	else
+		xstrcat(out, "\n   ");
+
+	/****** Line 17 ******/
+	sprintf(tmp_line, "WorkDir=%s", job_ptr->work_dir);
+	xstrcat(out, tmp_line);
 
 #ifdef HAVE_BG
 	/****** Line 18 (optional) ******/
diff --git a/src/api/pmi.c b/src/api/pmi.c
index 90e8b1eb1ec889c541ee271fc7bc865a082f95ba..af408c4283985fda1edc52ef6a7b938ec240b330 100644
--- a/src/api/pmi.c
+++ b/src/api/pmi.c
@@ -1614,13 +1614,16 @@ int PMI_Parse_option(int num_args, char *args[], int *num_parsed,
 		}
 		len = cp - kp;
 		temp[s].key = (char *) malloc((len+1) * sizeof (char));
-		if (temp[s].key == NULL)
+		if (temp[s].key == NULL) {
+			temp[s].val = NULL;
+			PMI_Free_keyvals(temp, s);
 			return PMI_FAIL;
+		}
 		strncpy(temp[s].key, kp, len);
 		temp[s].key[len] = '\0';
 		if (!IsPmiKey(temp[s].key)) {
 			free(temp[s].key);
-			temp[s].key=NULL;
+			temp[s].key = NULL;
 			break;
 		}
 		vp = ++cp;
@@ -1628,8 +1631,10 @@ int PMI_Parse_option(int num_args, char *args[], int *num_parsed,
 			cp++;
 		len = cp - vp + 1;
 		temp[s].val = (char *) malloc((len+1) * sizeof (char));
-		if (temp[s].val == NULL)
+		if (temp[s].val == NULL) {
+			PMI_Free_keyvals(temp, s+1);
 			return PMI_FAIL;
+		}
 		strncpy(temp[s].val, vp, len);
 		temp[s].val[len] = '\0';
 		s++;
@@ -1692,25 +1697,30 @@ int PMI_Args_to_keyval(int *argcp, char *((*argvp)[]), PMI_keyval_t **keyvalp,
 	if  (pmi_debug)
 		fprintf(stderr, "In: PMI_Args_to_keyval \n");
 
-	if ((keyvalp == NULL) || (size == NULL) || (argcp == NULL) || (argvp == NULL))
+	if ((keyvalp == NULL) || (size == NULL) || 
+	    (argcp == NULL) || (argvp == NULL))
 		return PMI_ERR_INVALID_ARG;
 
-	cnt=*argcp;
+	cnt  = *argcp;
 	argv = *argvp;
 
+	if (cnt == 0)
+		return PMI_ERR_INVALID_ARG;
+
 	temp = (PMI_keyval_t *) malloc(cnt * (sizeof (PMI_keyval_t)));
 	if (temp == NULL)
 		return PMI_FAIL;
 
-	if (cnt == 0)
-		return PMI_ERR_INVALID_ARG;
 	j = 0;
 	i = 0;
 
 	if (argv[i][0] != '-') {
 		temp[j].val = (char *) malloc((strlen(argv[i])+1) * sizeof (char));
-		if (temp[j].val == NULL)
+		if (temp[j].val == NULL) {
+			temp[j].key = NULL;
+			PMI_Free_keyvals(temp, j);
 			return PMI_FAIL;
+		}
 		strcpy(temp[j].val, argv[i]);
 		temp[i].key=NULL;
 		--cnt;
@@ -1722,8 +1732,11 @@ int PMI_Args_to_keyval(int *argcp, char *((*argvp)[]), PMI_keyval_t **keyvalp,
 		if (argv[i][0] == '-') {
 			temp[j].key = (char *) malloc((strlen(argv[i])+1) *
 					sizeof (char));
-			if (temp[j].key == NULL)
+			if (temp[j].key == NULL) {
+				temp[j].val = NULL;
+				PMI_Free_keyvals(temp, j);
 				return PMI_FAIL;
+			}
 			strcpy(temp[j].key, argv[i]);
 			++i;
 			--cnt;
@@ -1731,8 +1744,10 @@ int PMI_Args_to_keyval(int *argcp, char *((*argvp)[]), PMI_keyval_t **keyvalp,
 				temp[j].val = (char *) malloc(
 						(strlen(argv[i])+1) *
 						sizeof (char));
-				if (temp[j].val == NULL)
+				if (temp[j].val == NULL) {
+					PMI_Free_keyvals(temp, j+1);
 					return PMI_FAIL;
+				}
 				strcpy(temp[j].val, argv[i]);
 				i++;
 				--cnt;
@@ -1741,6 +1756,7 @@ int PMI_Args_to_keyval(int *argcp, char *((*argvp)[]), PMI_keyval_t **keyvalp,
 			}
 			j++;
 		} else {
+			PMI_Free_keyvals(temp, j);
 			return PMI_ERR_INVALID_ARG;
 		}
 	}
diff --git a/src/api/reconfigure.c b/src/api/reconfigure.c
index 0f3da3b2be2de6cd9ee65aca923d45f12acb711b..6ac81646578aabd648e3ac3197b1901dc538d373 100644
--- a/src/api/reconfigure.c
+++ b/src/api/reconfigure.c
@@ -1,7 +1,7 @@
 /*****************************************************************************\
  *  reconfigure.c - request that slurmctld shutdown or re-read the
  *	            configuration files
- *  $Id: reconfigure.c 19095 2009-12-01 22:59:18Z da $
+ *  $Id: reconfigure.c 19271 2010-01-19 21:00:56Z jette $
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
@@ -163,12 +163,15 @@ _send_message_controller (enum controller_id dest, slurm_msg_t *req)
 	if ((fd = slurm_open_controller_conn_spec(dest)) < 0)
 		slurm_seterrno_ret(SLURMCTLD_COMMUNICATIONS_CONNECTION_ERROR);
 
-	if (slurm_send_node_msg(fd, req) < 0)
+	if (slurm_send_node_msg(fd, req) < 0) {
+		slurm_shutdown_msg_conn(fd);
 		slurm_seterrno_ret(SLURMCTLD_COMMUNICATIONS_SEND_ERROR);
+	}
 	resp_msg = xmalloc(sizeof(slurm_msg_t));
 	slurm_msg_t_init(resp_msg);
 
 	if((rc = slurm_receive_msg(fd, resp_msg, 0)) != 0) {
+		slurm_shutdown_msg_conn(fd);
 		return SLURMCTLD_COMMUNICATIONS_RECEIVE_ERROR;
 	}
 
@@ -181,8 +184,8 @@ _send_message_controller (enum controller_id dest, slurm_msg_t *req)
 					   resp_msg->data);
 	slurm_free_msg(resp_msg);
 
-	if (rc) slurm_seterrno_ret(rc);
-
+	if (rc)
+		slurm_seterrno_ret(rc);
         return rc;
 }
 
diff --git a/src/api/step_ctx.c b/src/api/step_ctx.c
index 19afd405622a251c3a9775284ea0acab769c6bc8..61888adffa6f802ae03122f360d71d80c8238a34 100644
--- a/src/api/step_ctx.c
+++ b/src/api/step_ctx.c
@@ -156,6 +156,7 @@ slurm_step_ctx_create (const slurm_step_ctx_params_t *step_params)
 	    (step_resp == NULL)) {
 		errnum = errno;
 		slurm_free_job_step_create_request_msg(step_req);
+		close(sock);
 		goto fail;
 	}
 
@@ -295,8 +296,7 @@ slurm_step_ctx_get (slurm_step_ctx_t *ctx, int ctx_key, ...)
 
 	case SLURM_STEP_CTX_TID:
 		node_inx = va_arg(ap, uint32_t);
-		if ((node_inx < 0)
-		    || (node_inx > ctx->step_resp->step_layout->node_cnt)) {
+		if (node_inx > ctx->step_resp->step_layout->node_cnt) {
 			slurm_seterrno(EINVAL);
 			rc = SLURM_ERROR;
 			break;
@@ -325,8 +325,7 @@ slurm_step_ctx_get (slurm_step_ctx_t *ctx, int ctx_key, ...)
 		break;
 	case SLURM_STEP_CTX_HOST:
 		node_inx = va_arg(ap, uint32_t);
-		if ((node_inx < 0)
-		    || (node_inx > ctx->step_resp->step_layout->node_cnt)) {
+		if (node_inx > ctx->step_resp->step_layout->node_cnt) {
 			slurm_seterrno(EINVAL);
 			rc = SLURM_ERROR;
 			break;
diff --git a/src/api/step_launch.c b/src/api/step_launch.c
index 2ae2402da62cfc8b1094f437d494a108726b0b54..46161e5c6b99a6ea96eb4688602fc90fc242093f 100644
--- a/src/api/step_launch.c
+++ b/src/api/step_launch.c
@@ -442,6 +442,8 @@ void slurm_step_launch_wait_finish(slurm_step_ctx_t *ctx)
 			}
 		}
 	}
+	if (sls->abort && !time_set)
+		info("Job step aborted");	/* no need to wait */
 
 	if (!force_terminated_job && task_exit_signal)
 		info("Force Terminated job step %u.%u",
@@ -931,13 +933,13 @@ _node_fail_handler(struct step_launch_state *sls, slurm_msg_t *fail_msg)
 	for (i = 0; i < num_node_ids; i++) {
 		node = hostlist_next(fail_itr);
 		node_id = node_ids[i] = hostset_find(all_nodes, node);
-		free(node);
-
 		if (node_id < 0) {
 			error(  "Internal error: bad SRUN_NODE_FAIL message. "
 				"Node %s not part of this job step", node);
+			free(node);
 			continue;
 		}
+		free(node);
 
 		/* find all of the tasks that should run on this node and
 		 * mark them as having started and exited.  If they haven't
@@ -1028,13 +1030,13 @@ _step_missing_handler(struct step_launch_state *sls, slurm_msg_t *missing_msg)
 	for (i = 0; i < num_node_ids; i++) {
 		node = hostlist_next(fail_itr);
 		node_id = hostset_find(all_nodes, node);
-		free(node);
-
 		if (node_id < 0) {
 			error(  "Internal error: bad SRUN_STEP_MISSING message. "
 				"Node %s not part of this job step", node);
+			free(node);
 			continue;
 		}
+		free(node);
 
 		/* If this is true, an I/O error has already occurred on the
 		   stepd for the current node, and the job should abort */
diff --git a/src/common/assoc_mgr.c b/src/common/assoc_mgr.c
index f10760909c1ec660711de13d985c7f737d04c2f8..9fd367779c2244fa73562e3e7049d414d9d4c67d 100644
--- a/src/common/assoc_mgr.c
+++ b/src/common/assoc_mgr.c
@@ -2778,7 +2778,7 @@ extern int load_qos_usage(char *state_save_location)
 	debug3("Version in assoc_mgr_state header is %u", ver);
 	if (ver != ASSOC_USAGE_VERSION) {
 		error("***********************************************");
-		error("Can not recover usage_mgr state, incompatable version, "
+		error("Can not recover usage_mgr state, incompatible version, "
 		      "got %u need %u", ver, ASSOC_USAGE_VERSION);
 		error("***********************************************");
 		free_buf(buffer);
diff --git a/src/common/jobacct_common.c b/src/common/jobacct_common.c
index 7795bfced517d6a5431c901d60f6735cd2838ca5..218dc5a19f3c6977c666316ee753335e512e4cdc 100644
--- a/src/common/jobacct_common.c
+++ b/src/common/jobacct_common.c
@@ -44,14 +44,15 @@
 pthread_mutex_t jobacct_lock = PTHREAD_MUTEX_INITIALIZER;
 uint32_t jobacct_job_id = 0;
 uint32_t jobacct_mem_limit = 0;
+uint32_t mult = 1000;
 
-static void _pack_jobacct_id(jobacct_id_t *jobacct_id, Buf buffer)
+static void _pack_jobacct_id(jobacct_id_t *jobacct_id, uint16_t rpc_version, Buf buffer)
 {
 	pack32((uint32_t)jobacct_id->nodeid, buffer);
 	pack16((uint16_t)jobacct_id->taskid, buffer);
 }
 
-static int _unpack_jobacct_id(jobacct_id_t *jobacct_id, Buf buffer)
+static int _unpack_jobacct_id(jobacct_id_t *jobacct_id, uint16_t rpc_version, Buf buffer)
 {
 	safe_unpack32(&jobacct_id->nodeid, buffer);
 	safe_unpack16(&jobacct_id->taskid, buffer);
@@ -60,62 +61,124 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-static void _pack_sacct(sacct_t *sacct, Buf buffer)
+static void _pack_sacct(sacct_t *sacct, uint16_t rpc_version, Buf buffer)
 {
 	int i=0;
 
-	if(!sacct) {
-		for(i=0; i<4; i++)
-			pack32((uint32_t) 0, buffer);
+	if(rpc_version >= 6) {
+		if(!sacct) {
+			for(i=0; i<4; i++)
+				pack32((uint32_t) 0, buffer);
 
-		for(i=0; i<4; i++)
-			packdouble(0, buffer);
+			for(i=0; i<4; i++)
+				packdouble(0, buffer);
 
-		for(i=0; i<4; i++) {	/* _pack_jobacct_id() */
-			pack32((uint32_t) 0, buffer);
-			pack16((uint16_t) 0, buffer);
+			for(i=0; i<4; i++) {	/* _pack_jobacct_id() */
+				pack32((uint32_t) 0, buffer);
+				pack16((uint16_t) 0, buffer);
+			}
+			return;
 		}
-		return;
-	}
 
-	pack32(sacct->max_vsize, buffer);
-	pack32(sacct->max_rss, buffer);
-	pack32(sacct->max_pages, buffer);
-	pack32(sacct->min_cpu, buffer);
+		pack32(sacct->max_vsize, buffer);
+		pack32(sacct->max_rss, buffer);
+		pack32(sacct->max_pages, buffer);
+		pack32(sacct->min_cpu, buffer);
 
-	packdouble(sacct->ave_vsize, buffer);
-	packdouble(sacct->ave_rss, buffer);
-	packdouble(sacct->ave_pages, buffer);
-	packdouble(sacct->ave_cpu, buffer);
+		packdouble(sacct->ave_vsize, buffer);
+		packdouble(sacct->ave_rss, buffer);
+		packdouble(sacct->ave_pages, buffer);
+		packdouble(sacct->ave_cpu, buffer);
 
-	_pack_jobacct_id(&sacct->max_vsize_id, buffer);
-	_pack_jobacct_id(&sacct->max_rss_id, buffer);
-	_pack_jobacct_id(&sacct->max_pages_id, buffer);
-	_pack_jobacct_id(&sacct->min_cpu_id, buffer);
-}
+		_pack_jobacct_id(&sacct->max_vsize_id, rpc_version, buffer);
+		_pack_jobacct_id(&sacct->max_rss_id, rpc_version, buffer);
+		_pack_jobacct_id(&sacct->max_pages_id, rpc_version, buffer);
+		_pack_jobacct_id(&sacct->min_cpu_id, rpc_version, buffer);
+	} else {
+		uint32_t temp;
 
-/* you need to xfree this */
-static int _unpack_sacct(sacct_t *sacct, Buf buffer)
-{
-	safe_unpack32(&sacct->max_vsize, buffer);
-	safe_unpack32(&sacct->max_rss, buffer);
-	safe_unpack32(&sacct->max_pages, buffer);
-	safe_unpack32(&sacct->min_cpu, buffer);
+		if(!sacct) {
+			for(i=0; i<8; i++)
+				pack32((uint32_t) 0, buffer);
 
-	safe_unpackdouble(&sacct->ave_vsize, buffer);
-	safe_unpackdouble(&sacct->ave_rss, buffer);
-	safe_unpackdouble(&sacct->ave_pages, buffer);
-	safe_unpackdouble(&sacct->ave_cpu, buffer);
+			for(i=0; i<4; i++) {	/* _pack_jobacct_id() */
+				pack32((uint32_t) 0, buffer);
+				pack16((uint16_t) 0, buffer);
+			}
+			return;
+		}
 
-	if(_unpack_jobacct_id(&sacct->max_vsize_id, buffer) != SLURM_SUCCESS)
-		goto unpack_error;
-	if(_unpack_jobacct_id(&sacct->max_rss_id, buffer) != SLURM_SUCCESS)
-		goto unpack_error;
-	if(_unpack_jobacct_id(&sacct->max_pages_id, buffer) != SLURM_SUCCESS)
-		goto unpack_error;
-	if(_unpack_jobacct_id(&sacct->min_cpu_id, buffer) != SLURM_SUCCESS)
-		goto unpack_error;
+		pack32(sacct->max_vsize, buffer);
+		temp = sacct->ave_vsize * mult;
+		pack32(temp, buffer);
+		pack32(sacct->max_rss, buffer);
+		temp = (uint32_t)sacct->ave_rss * mult;
+		pack32(temp, buffer);
+		pack32(sacct->max_pages, buffer);
+		temp = (uint32_t)sacct->ave_pages * mult;
+		pack32(temp, buffer);
+		temp = (uint32_t)sacct->min_cpu * mult;
+		pack32(temp, buffer);
+		temp = (uint32_t)sacct->ave_cpu * mult;
+		pack32(temp, buffer);
+
+		_pack_jobacct_id(&sacct->max_vsize_id, rpc_version, buffer);
+		_pack_jobacct_id(&sacct->max_rss_id, rpc_version, buffer);
+		_pack_jobacct_id(&sacct->max_pages_id, rpc_version, buffer);
+		_pack_jobacct_id(&sacct->min_cpu_id, rpc_version, buffer);
+	}
+}
 
+/* you need to xfree this */
+static int _unpack_sacct(sacct_t *sacct, uint16_t rpc_version, Buf buffer)
+{
+	if(rpc_version >= 6) {
+		safe_unpack32(&sacct->max_vsize, buffer);
+		safe_unpack32(&sacct->max_rss, buffer);
+		safe_unpack32(&sacct->max_pages, buffer);
+		safe_unpack32(&sacct->min_cpu, buffer);
+
+		safe_unpackdouble(&sacct->ave_vsize, buffer);
+		safe_unpackdouble(&sacct->ave_rss, buffer);
+		safe_unpackdouble(&sacct->ave_pages, buffer);
+		safe_unpackdouble(&sacct->ave_cpu, buffer);
+
+		if(_unpack_jobacct_id(&sacct->max_vsize_id, rpc_version, buffer) != SLURM_SUCCESS)
+			goto unpack_error;
+		if(_unpack_jobacct_id(&sacct->max_rss_id, rpc_version, buffer) != SLURM_SUCCESS)
+			goto unpack_error;
+		if(_unpack_jobacct_id(&sacct->max_pages_id, rpc_version, buffer) != SLURM_SUCCESS)
+			goto unpack_error;
+		if(_unpack_jobacct_id(&sacct->min_cpu_id, rpc_version, buffer) != SLURM_SUCCESS)
+			goto unpack_error;
+	} else {
+		/* this is here to handle the floats since it appears sending
+		 * in a float with a typecast returns incorrect information
+		 */
+		uint32_t temp;
+
+		safe_unpack32(&sacct->max_vsize, buffer);
+		safe_unpack32(&temp, buffer);
+		sacct->ave_vsize = temp / mult;
+		safe_unpack32(&sacct->max_rss, buffer);
+		safe_unpack32(&temp, buffer);
+		sacct->ave_rss = temp / mult;
+		safe_unpack32(&sacct->max_pages, buffer);
+		safe_unpack32(&temp, buffer);
+		sacct->ave_pages = temp / mult;
+		safe_unpack32(&temp, buffer);
+		sacct->min_cpu = temp / mult;
+		safe_unpack32(&temp, buffer);
+		sacct->ave_cpu = temp / mult;
+		if(_unpack_jobacct_id(&sacct->max_vsize_id, rpc_version, buffer) != SLURM_SUCCESS)
+			goto unpack_error;
+		if(_unpack_jobacct_id(&sacct->max_rss_id, rpc_version, buffer) != SLURM_SUCCESS)
+			goto unpack_error;
+		if(_unpack_jobacct_id(&sacct->max_pages_id, rpc_version, buffer) != SLURM_SUCCESS)
+			goto unpack_error;
+		if(_unpack_jobacct_id(&sacct->min_cpu_id, rpc_version, buffer) != SLURM_SUCCESS)
+			goto unpack_error;
+	}
 	return SLURM_SUCCESS;
 
 unpack_error:
@@ -220,7 +283,7 @@ extern void pack_jobacct_job_rec(void *object, uint16_t rpc_version, Buf buffer)
 		pack32(job->resvid, buffer);
 		pack32(job->req_cpus, buffer);
 		pack32(job->requid, buffer);
-		_pack_sacct(&job->sacct, buffer);
+		_pack_sacct(&job->sacct, rpc_version, buffer);
 		pack32(job->show_full, buffer);
 		pack_time(job->start, buffer);
 		pack16((uint16_t)job->state, buffer);
@@ -269,7 +332,7 @@ extern void pack_jobacct_job_rec(void *object, uint16_t rpc_version, Buf buffer)
 		pack16(job->qos, buffer);
 		pack32(job->req_cpus, buffer);
 		pack32(job->requid, buffer);
-		_pack_sacct(&job->sacct, buffer);
+		_pack_sacct(&job->sacct, rpc_version, buffer);
 		pack32(job->show_full, buffer);
 		pack_time(job->start, buffer);
 		pack16((uint16_t)job->state, buffer);
@@ -317,7 +380,7 @@ extern void pack_jobacct_job_rec(void *object, uint16_t rpc_version, Buf buffer)
 		pack16(job->qos, buffer);
 		pack32(job->req_cpus, buffer);
 		pack32(job->requid, buffer);
-		_pack_sacct(&job->sacct, buffer);
+		_pack_sacct(&job->sacct, rpc_version, buffer);
 		pack32(job->show_full, buffer);
 		pack_time(job->start, buffer);
 		pack16((uint16_t)job->state, buffer);
@@ -381,7 +444,7 @@ extern int unpack_jobacct_job_rec(void **job, uint16_t rpc_version, Buf buffer)
 		safe_unpack32(&job_ptr->resvid, buffer);
 		safe_unpack32(&job_ptr->req_cpus, buffer);
 		safe_unpack32(&job_ptr->requid, buffer);
-		_pack_sacct(&job_ptr->sacct, buffer);
+		_pack_sacct(&job_ptr->sacct, rpc_version, buffer);
 		safe_unpack32(&job_ptr->show_full, buffer);
 		safe_unpack_time(&job_ptr->start, buffer);
 		safe_unpack16(&uint16_tmp, buffer);
@@ -435,7 +498,7 @@ extern int unpack_jobacct_job_rec(void **job, uint16_t rpc_version, Buf buffer)
 		safe_unpack16(&job_ptr->qos, buffer);
 		safe_unpack32(&job_ptr->req_cpus, buffer);
 		safe_unpack32(&job_ptr->requid, buffer);
-		_pack_sacct(&job_ptr->sacct, buffer);
+		_pack_sacct(&job_ptr->sacct, rpc_version, buffer);
 		safe_unpack32(&job_ptr->show_full, buffer);
 		safe_unpack_time(&job_ptr->start, buffer);
 		safe_unpack16(&uint16_tmp, buffer);
@@ -489,7 +552,7 @@ extern int unpack_jobacct_job_rec(void **job, uint16_t rpc_version, Buf buffer)
 		safe_unpack16(&job_ptr->qos, buffer);
 		safe_unpack32(&job_ptr->req_cpus, buffer);
 		safe_unpack32(&job_ptr->requid, buffer);
-		_pack_sacct(&job_ptr->sacct, buffer);
+		_pack_sacct(&job_ptr->sacct, rpc_version, buffer);
 		safe_unpack32(&job_ptr->show_full, buffer);
 		safe_unpack_time(&job_ptr->start, buffer);
 		safe_unpack16(&uint16_tmp, buffer);
@@ -541,7 +604,7 @@ extern void pack_jobacct_step_rec(jobacct_step_rec_t *step,
 		packstr(step->nodes, buffer);
 		pack32(step->ntasks, buffer);
 		pack32(step->requid, buffer);
-		_pack_sacct(&step->sacct, buffer);
+		_pack_sacct(&step->sacct, rpc_version, buffer);
 		pack_time(step->start, buffer);
 		pack16(step->state, buffer);
 		pack32(step->stepid, buffer);	/* job's step number */
@@ -562,7 +625,7 @@ extern void pack_jobacct_step_rec(jobacct_step_rec_t *step,
 		pack32(step->ncpus, buffer);
 		packstr(step->nodes, buffer);
 		pack32(step->requid, buffer);
-		_pack_sacct(&step->sacct, buffer);
+		_pack_sacct(&step->sacct, rpc_version, buffer);
 		pack_time(step->start, buffer);
 		pack16(step->state, buffer);
 		pack32(step->stepid, buffer);	/* job's step number */
@@ -596,7 +659,7 @@ extern int unpack_jobacct_step_rec(jobacct_step_rec_t **step,
 		safe_unpackstr_xmalloc(&step_ptr->nodes, &uint32_tmp, buffer);
 		safe_unpack32(&step_ptr->ntasks, buffer);
 		safe_unpack32(&step_ptr->requid, buffer);
-		_unpack_sacct(&step_ptr->sacct, buffer);
+		_unpack_sacct(&step_ptr->sacct, rpc_version, buffer);
 		safe_unpack_time(&step_ptr->start, buffer);
 		safe_unpack16(&uint16_tmp, buffer);
 		step_ptr->state = uint16_tmp;
@@ -620,7 +683,7 @@ extern int unpack_jobacct_step_rec(jobacct_step_rec_t **step,
 		safe_unpack32(&step_ptr->ncpus, buffer);
 		safe_unpackstr_xmalloc(&step_ptr->nodes, &uint32_tmp, buffer);
 		safe_unpack32(&step_ptr->requid, buffer);
-		_unpack_sacct(&step_ptr->sacct, buffer);
+		_unpack_sacct(&step_ptr->sacct, rpc_version, buffer);
 		safe_unpack_time(&step_ptr->start, buffer);
 		safe_unpack16(&uint16_tmp, buffer);
 		step_ptr->state = uint16_tmp;
@@ -859,7 +922,7 @@ rwfail:
 }
 
 extern void jobacct_common_aggregate(struct jobacctinfo *dest,
-			     struct jobacctinfo *from)
+				     struct jobacctinfo *from)
 {
 	xassert(dest);
 	xassert(from);
@@ -939,7 +1002,7 @@ extern void jobacct_common_2_sacct(sacct_t *sacct, struct jobacctinfo *jobacct)
 	slurm_mutex_unlock(&jobacct_lock);
 }
 
-extern void jobacct_common_pack(struct jobacctinfo *jobacct, Buf buffer)
+extern void jobacct_common_pack(struct jobacctinfo *jobacct, uint16_t rpc_version, Buf buffer)
 {
 	int i=0;
 
@@ -963,15 +1026,15 @@ extern void jobacct_common_pack(struct jobacctinfo *jobacct, Buf buffer)
 	pack32((uint32_t)jobacct->tot_pages, buffer);
 	pack32((uint32_t)jobacct->min_cpu, buffer);
 	pack32((uint32_t)jobacct->tot_cpu, buffer);
-	_pack_jobacct_id(&jobacct->max_vsize_id, buffer);
-	_pack_jobacct_id(&jobacct->max_rss_id, buffer);
-	_pack_jobacct_id(&jobacct->max_pages_id, buffer);
-	_pack_jobacct_id(&jobacct->min_cpu_id, buffer);
+	_pack_jobacct_id(&jobacct->max_vsize_id, rpc_version, buffer);
+	_pack_jobacct_id(&jobacct->max_rss_id, rpc_version, buffer);
+	_pack_jobacct_id(&jobacct->max_pages_id, rpc_version, buffer);
+	_pack_jobacct_id(&jobacct->min_cpu_id, rpc_version, buffer);
 	slurm_mutex_unlock(&jobacct_lock);
 }
 
 /* you need to xfree this */
-extern int jobacct_common_unpack(struct jobacctinfo **jobacct, Buf buffer)
+extern int jobacct_common_unpack(struct jobacctinfo **jobacct, uint16_t rpc_version, Buf buffer)
 {
 	uint32_t uint32_tmp;
 	*jobacct = xmalloc(sizeof(struct jobacctinfo));
@@ -991,16 +1054,16 @@ extern int jobacct_common_unpack(struct jobacctinfo **jobacct, Buf buffer)
 	safe_unpack32(&(*jobacct)->tot_pages, buffer);
 	safe_unpack32(&(*jobacct)->min_cpu, buffer);
 	safe_unpack32(&(*jobacct)->tot_cpu, buffer);
-	if(_unpack_jobacct_id(&(*jobacct)->max_vsize_id, buffer)
+	if(_unpack_jobacct_id(&(*jobacct)->max_vsize_id, rpc_version, buffer)
 	   != SLURM_SUCCESS)
 		goto unpack_error;
-	if(_unpack_jobacct_id(&(*jobacct)->max_rss_id, buffer)
+	if(_unpack_jobacct_id(&(*jobacct)->max_rss_id, rpc_version, buffer)
 	   != SLURM_SUCCESS)
 		goto unpack_error;
-	if(_unpack_jobacct_id(&(*jobacct)->max_pages_id, buffer)
+	if(_unpack_jobacct_id(&(*jobacct)->max_pages_id, rpc_version, buffer)
 	   != SLURM_SUCCESS)
 		goto unpack_error;
-	if(_unpack_jobacct_id(&(*jobacct)->min_cpu_id, buffer)
+	if(_unpack_jobacct_id(&(*jobacct)->min_cpu_id, rpc_version, buffer)
 	   != SLURM_SUCCESS)
 		goto unpack_error;
 
diff --git a/src/common/jobacct_common.h b/src/common/jobacct_common.h
index 9c601511ff7e86baba6a547deabece9dde9b955b..4f797fca4ee5c514e2c01d132f24ac87f985b61f 100644
--- a/src/common/jobacct_common.h
+++ b/src/common/jobacct_common.h
@@ -232,8 +232,10 @@ extern void jobacct_common_aggregate(struct jobacctinfo *dest,
 			     struct jobacctinfo *from);
 extern void jobacct_common_2_sacct(sacct_t *sacct,
 				   struct jobacctinfo *jobacct);
-extern void jobacct_common_pack(struct jobacctinfo *jobacct, Buf buffer);
-extern int jobacct_common_unpack(struct jobacctinfo **jobacct, Buf buffer);
+extern void jobacct_common_pack(struct jobacctinfo *jobacct,
+				uint16_t rpc_version, Buf buffer);
+extern int jobacct_common_unpack(struct jobacctinfo **jobacct,
+				 uint16_t rpc_version, Buf buffer);
 
 extern int jobacct_common_set_mem_limit(uint32_t job_id, uint32_t mem_limit);
 extern int jobacct_common_add_task(pid_t pid, jobacct_id_t *jobacct_id,
diff --git a/src/common/net.c b/src/common/net.c
index e2929046107629f72d77037d0912cf112d3383dd..3f4ad8772383c333598946abfae711061cfab5fa 100644
--- a/src/common/net.c
+++ b/src/common/net.c
@@ -104,7 +104,7 @@ int net_stream_listen(int *fd, short *port)
 
 	val = 1;
 	rc = setsockopt(*fd, SOL_SOCKET, SO_REUSEADDR, &val, sizeof(int));
-	if (rc > 0)
+	if (rc < 0)
 		goto cleanup;
 
 	*port = _sock_bind_wild(*fd);
diff --git a/src/common/plugrack.c b/src/common/plugrack.c
index b7543ce04b9f32c1da704f26398feaee19d38ca0..d2b63cab80bf5d075a822519fd3f917ac2610ed1 100644
--- a/src/common/plugrack.c
+++ b/src/common/plugrack.c
@@ -1,7 +1,8 @@
 /*****************************************************************************\
  *  plugrack.c - an intelligent container for plugins
  *****************************************************************************
- *  Copyright (C) 2002 The Regents of the University of California.
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Jay Windley <jwindley@lnxi.com>.
  *  CODE-OCEC-09-009. All rights reserved.
@@ -104,10 +105,10 @@ char *strchr(), *strrchr();
  * If this is zero, the rack code may decide to unload the plugin.
  */
 typedef struct _plugrack_entry {
-        const char *full_type;
-        const char *fq_path;
-        plugin_handle_t        plug;
-        int refcount;
+	const char *full_type;
+	const char *fq_path;
+	plugin_handle_t	plug;
+	int refcount;
 } plugrack_entry_t;
 
 /*
@@ -124,13 +125,13 @@ typedef struct _plugrack_entry {
  * loading it.
  */
 struct _plugrack {
-        List entries;
-        const char *major_type;
-        uid_t uid;
-        uint8_t     paranoia;
+	List entries;
+	const char *major_type;
+	uid_t uid;
+	uint8_t     paranoia;
 };
 
-#define PLUGRACK_UID_NOBODY                99        /* RedHat's, anyway. */
+#define PLUGRACK_UID_NOBODY		99	/* RedHat's, anyway. */
 
 static bool _match_major ( const char *path_name, const char *major_type );
 static int _plugrack_read_single_dir( plugrack_t rack, char *dir );
@@ -143,21 +144,22 @@ static bool _so_file( char *pathname );
 static void
 plugrack_entry_destructor( void *v )
 {
-        plugrack_entry_t *victim = v;
-
-        if ( victim == NULL ) return;
-
-        /*
-         * Free memory and unload the plugin if necessary.  The assert
-         * is to make sure we were actually called from the List destructor
-         * which should only be callable from plugrack_destroy().
-         */
-        xassert( victim->refcount == 0 );
-        xfree( victim->full_type );
-        xfree( victim->fq_path );
-        if ( victim->plug != PLUGIN_INVALID_HANDLE )
+	plugrack_entry_t *victim = v;
+
+	if ( victim == NULL )
+		return;
+
+	/*
+	 * Free memory and unload the plugin if necessary.  The assert
+	 * is to make sure we were actually called from the List destructor
+	 * which should only be callable from plugrack_destroy().
+	 */
+	xassert( victim->refcount == 0 );
+	xfree( victim->full_type );
+	xfree( victim->fq_path );
+	if ( victim->plug != PLUGIN_INVALID_HANDLE )
 		plugin_unload( victim->plug );
-        xfree( victim );
+	xfree( victim );
 }
 
 /*
@@ -172,88 +174,88 @@ plugrack_entry_destructor( void *v )
  */
 static int
 accept_path_paranoia( plugrack_t rack,
-                      const char *fq_path,
-                      int check_own,
-                      int check_write )
+		      const char *fq_path,
+		      int check_own,
+		      int check_write )
 {
-        struct stat st;
+	struct stat st;
 
-        /* Internal function, so assert rather than fail gracefully. */
-        xassert( rack );
-        xassert( fq_path );
+	/* Internal function, so assert rather than fail gracefully. */
+	xassert( rack );
+	xassert( fq_path );
 
-        if ( stat( fq_path, &st ) < 0 ) {
+	if ( stat( fq_path, &st ) < 0 ) {
 		debug3( "accept_path_paranoia: stat(%s) failed", fq_path );
-                return 0;
-        }
+		return 0;
+	}
 
-        /* Is path owned by authorized user? */
-        if ( check_own ) {
-                if ( st.st_uid != rack->uid ) {
+	/* Is path owned by authorized user? */
+	if ( check_own ) {
+		if ( st.st_uid != rack->uid ) {
 			debug3( "accept_path_paranoia: %s not owned by "
 				"proper user", fq_path );
 			return 0;
 		}
-        }
+	}
 
-        /* Is path writable by others? */
-        if ( check_write ) {
-                if (  ( st.st_mode & S_IWGRP )
-		      || ( st.st_mode & S_IWOTH ) ) {
+	/* Is path writable by others? */
+	if ( check_write ) {
+		if (  ( st.st_mode & S_IWGRP ) || ( st.st_mode & S_IWOTH ) ) {
 			debug3( "accept_path_paranoia: %s writable by others",
 				fq_path );
 			return 0;
 		}
-        }
+	}
 
-        return 1;
+	return 1;
 }
 
 
 plugrack_t plugrack_create( void )
 {
-        plugrack_t rack = (plugrack_t) xmalloc( sizeof( struct _plugrack ) );
-
-        rack->paranoia     = PLUGRACK_PARANOIA_NONE;
-        rack->major_type   = NULL;
-        rack->uid          = PLUGRACK_UID_NOBODY;
-        rack->entries      = list_create( plugrack_entry_destructor );
-        if ( rack->entries == NULL ) {
-                xfree( rack );
-                return NULL;
-        }
-        return rack;
+	plugrack_t rack = (plugrack_t) xmalloc( sizeof( struct _plugrack ) );
+
+	rack->paranoia     = PLUGRACK_PARANOIA_NONE;
+	rack->major_type   = NULL;
+	rack->uid          = PLUGRACK_UID_NOBODY;
+	rack->entries      = list_create( plugrack_entry_destructor );
+	if ( rack->entries == NULL ) {
+		xfree( rack );
+		return NULL;
+	}
+	return rack;
 }
 
 
 int
 plugrack_destroy( plugrack_t rack )
 {
-        ListIterator it;
-        plugrack_entry_t *e;
-
-        if ( ! rack ) return SLURM_ERROR;
-
-        /*
-         * See if there are any plugins still being used.  If we unload them,
-         * the program might crash because cached virtual mapped addresses
-         * will suddenly be outside our virtual address space.
-         */
-        it = list_iterator_create( rack->entries );
-        while ( ( e = list_next( it ) ) != NULL ) {
-                if ( e->refcount > 0 ) {
+	ListIterator it;
+	plugrack_entry_t *e;
+
+	if ( ! rack )
+		return SLURM_ERROR;
+
+	/*
+	 * See if there are any plugins still being used.  If we unload them,
+	 * the program might crash because cached virtual mapped addresses
+	 * will suddenly be outside our virtual address space.
+	 */
+	it = list_iterator_create( rack->entries );
+	while ( ( e = list_next( it ) ) != NULL ) {
+		if ( e->refcount > 0 ) {
 			debug2( "plugrack_destroy: attempt to destroy "
 				"plugin rack that is still in use" );
-                        list_iterator_destroy( it );
-                        return SLURM_ERROR; /* plugins still in use. */
-                }
-        }
-        list_iterator_destroy( it );
+			list_iterator_destroy( it );
+			return SLURM_ERROR; /* plugins still in use. */
+		}
+	}
+	list_iterator_destroy( it );
 
-        list_destroy( rack->entries );
+	list_destroy( rack->entries );
 	xfree( rack->major_type );
-        xfree( rack );
-        return SLURM_SUCCESS;
+	xfree( rack );
+	return SLURM_SUCCESS;
 }
 
 
@@ -268,34 +270,34 @@ plugrack_set_major_type( plugrack_t rack, const char *type )
 	/* Free any pre-existing type. */
 	xfree( rack->major_type );
 
-        /* Install a new one. */
-        if ( type != NULL ) {
-                rack->major_type = xstrdup( type );
-                if ( rack->major_type == NULL ) {
-			debug3( "plugrack_set_major_type: unable to set type" );
+	/* Install a new one. */
+	if ( type != NULL ) {
+		rack->major_type = xstrdup( type );
+		if ( rack->major_type == NULL ) {
+			debug3( "plugrack_set_major_type: unable to set type");
 			return SLURM_ERROR;
 		}
-        }
+	}
 
-        return SLURM_SUCCESS;
+	return SLURM_SUCCESS;
 }
 
 
 int
 plugrack_set_paranoia( plugrack_t rack,
-                       const uint32_t flags,
-                       const uid_t uid )
+		       const uint32_t flags,
+		       const uid_t uid )
 
 {
 	if ( ! rack )
 		return SLURM_ERROR;
 
-        rack->paranoia = flags;
-        if ( flags ) {
-                rack->uid = uid;
-        }
+	rack->paranoia = flags;
+	if ( flags ) {
+		rack->uid = uid;
+	}
 
-        return SLURM_SUCCESS;
+	return SLURM_SUCCESS;
 }
 
 static int
@@ -303,20 +305,20 @@ plugrack_add_plugin_path( plugrack_t rack,
 			  const char *full_type,
 			  const char *fq_path )
 {
-        plugrack_entry_t *e;
+	plugrack_entry_t *e;
 
-        if ( ! rack ) return SLURM_ERROR;
-        if ( ! fq_path ) return SLURM_ERROR;
+	if ( ( ! rack ) || ( ! fq_path ) )
+		return SLURM_ERROR;
 
-        e = (plugrack_entry_t *) xmalloc( sizeof( plugrack_entry_t ) );
+	e = (plugrack_entry_t *) xmalloc( sizeof( plugrack_entry_t ) );
 
-        e->full_type = xstrdup( full_type );
-        e->fq_path   = xstrdup( fq_path );
-        e->plug      = PLUGIN_INVALID_HANDLE;
-        e->refcount  = 0;
+	e->full_type = xstrdup( full_type );
+	e->fq_path   = xstrdup( fq_path );
+	e->plug      = PLUGIN_INVALID_HANDLE;
+	e->refcount  = 0;
 	list_append( rack->entries, e );
 
-        return SLURM_SUCCESS;
+	return SLURM_SUCCESS;
 }
 
 
@@ -327,7 +329,7 @@ plugrack_read_dir( plugrack_t rack, const char *dir )
 	char *head, *dir_array;
 	int i, rc = SLURM_SUCCESS;
 
-	if ( ( ! rack ) || (! dir ) )
+	if ( ( ! rack ) || ( ! dir ) )
 		return SLURM_ERROR;
 
 	dir_array = xstrdup(dir);
@@ -354,70 +356,70 @@ plugrack_read_dir( plugrack_t rack, const char *dir )
 static int
 _plugrack_read_single_dir( plugrack_t rack, char *dir )
 {
-        char *fq_path;
-        char *tail;
-        DIR *dirp;
-        struct dirent *e;
-        struct stat st;
+	char *fq_path;
+	char *tail;
+	DIR *dirp;
+	struct dirent *e;
+	struct stat st;
 	static const size_t type_len = 64;
 	char plugin_type[ type_len ];
 	static int max_path_len = 0;
 
-        /* Allocate a buffer for fully-qualified path names. */
+	/* Allocate a buffer for fully-qualified path names. */
 	if (max_path_len == 0) {
 		max_path_len = pathconf("/", _PC_NAME_MAX);
 		if (max_path_len <= 0)
 			max_path_len = 256;
 	}
-        fq_path = xmalloc( strlen( dir ) + max_path_len + 1 );
-        xassert( fq_path );
-
-        /*
-         * Write the directory name in it, then a separator, then
-         * keep track of where we want to write the individual file
-         * names.
-         */
-        strcpy( fq_path, dir );
-        tail = &fq_path[ strlen( dir ) ];
-        *tail = '/';
-        ++tail;
-
-        /* Check whether we should be paranoid about this directory. */
-        if ( ! accept_path_paranoia( rack,
-                                     dir,
-                                     rack->paranoia &
+	fq_path = xmalloc( strlen( dir ) + max_path_len + 1 );
+	xassert( fq_path );
+
+	/*
+	 * Write the directory name in it, then a separator, then
+	 * keep track of where we want to write the individual file
+	 * names.
+	 */
+	strcpy( fq_path, dir );
+	tail = &fq_path[ strlen( dir ) ];
+	*tail = '/';
+	++tail;
+
+	/* Check whether we should be paranoid about this directory. */
+	if ( ! accept_path_paranoia( rack,
+				     dir,
+				     rack->paranoia &
 				     PLUGRACK_PARANOIA_DIR_OWN,
-                                     rack->paranoia &
+				     rack->paranoia &
 				     PLUGRACK_PARANOIA_DIR_WRITABLE ) ) {
 		xfree( fq_path );
-                return SLURM_ERROR;
-        }
+		return SLURM_ERROR;
+	}
 
-        /* Open the directory. */
-        dirp = opendir( dir );
-        if ( dirp == NULL ) {
+	/* Open the directory. */
+	dirp = opendir( dir );
+	if ( dirp == NULL ) {
 		error( "cannot open plugin directory %s", dir );
 		xfree( fq_path );
 		return SLURM_ERROR;
 	}
 
-        while ( 1 ) {
-                e = readdir( dirp );
-                if ( e == NULL )
+	while ( 1 ) {
+		e = readdir( dirp );
+		if ( e == NULL )
 			break;
 
-                /*
-                 * Compose file name.  Where NAME_MAX is defined it represents
+		/*
+		 * Compose file name.  Where NAME_MAX is defined it represents
 		 * the largest file name given in a dirent.  This macro is used
 		 * in the  allocation of "tail" above, so this unbounded copy
 		 * should work.
-                 */
-                strcpy( tail, e->d_name );
+		 */
+		strcpy( tail, e->d_name );
 
-                /* Check only regular files. */
-		if ( (strncmp(e->d_name, ".", 1) == 0)
-		     ||   (stat( fq_path, &st ) < 0)
-		     ||   (! S_ISREG(st.st_mode)) )
+		/* Check only regular files. */
+		if ( (strncmp(e->d_name, ".", 1) == 0) ||
+		     (stat( fq_path, &st ) < 0) ||
+		     (! S_ISREG(st.st_mode)) )
 			continue;
 
 		/* Check only shared object files */
@@ -432,19 +434,19 @@ _plugrack_read_single_dir( plugrack_t rack, char *dir )
 		    (!_match_major(e->d_name, rack->major_type)))
 			continue;
 
-                /* See if we should be paranoid about this file. */
-                if (!accept_path_paranoia( rack,
-                                           fq_path,
-                                           rack->paranoia &
-                                           PLUGRACK_PARANOIA_FILE_OWN,
-                                           rack->paranoia &
-                                           PLUGRACK_PARANOIA_FILE_WRITABLE )) {
+		/* See if we should be paranoid about this file. */
+		if (!accept_path_paranoia( rack,
+					   fq_path,
+					   rack->paranoia &
+					   PLUGRACK_PARANOIA_FILE_OWN,
+					   rack->paranoia &
+					   PLUGRACK_PARANOIA_FILE_WRITABLE )) {
 			debug3( "plugin_read_dir: skipping %s for security "
 				"reasons", fq_path );
-                        continue;
-                }
+			continue;
+		}
 
-                /* Test the type. */
+		/* Test the type. */
 		if ( plugin_peek( fq_path,
 				  plugin_type,
 				  type_len,
@@ -452,21 +454,21 @@ _plugrack_read_single_dir( plugrack_t rack, char *dir )
 			continue;
 		}
 
-		if (   rack->major_type
-		       && ( strncmp( rack->major_type,
-				     plugin_type,
-				     strlen( rack->major_type ) ) != 0 ) ) {
+		if (   rack->major_type &&
+		       ( strncmp( rack->major_type,
+				  plugin_type,
+				  strlen( rack->major_type ) ) != 0 ) ) {
 			continue;
 		}
 
-                /* Add it to the list. */
-                (void) plugrack_add_plugin_path( rack, plugin_type, fq_path );
-        }
+		/* Add it to the list. */
+		(void) plugrack_add_plugin_path( rack, plugin_type, fq_path );
+	}
 
 	closedir( dirp );
 
 	xfree( fq_path );
-        return SLURM_SUCCESS;
+	return SLURM_SUCCESS;
 }
 
 /* Return TRUE if the specified pathname is recognized as that of a shared
@@ -507,75 +509,75 @@ _match_major ( const char *path_name, const char *major_type )
 
 int
 plugrack_read_cache( plugrack_t rack,
-                     const char *cache_file )
+		     const char *cache_file )
 {
-        /* Don't care for now. */
+	/* Don't care for now. */
 
-        return SLURM_ERROR;
+	return SLURM_ERROR;
 }
 
 
 int
 plugrack_purge_idle( plugrack_t rack )
 {
-        ListIterator it;
-        plugrack_entry_t *e;
+	ListIterator it;
+	plugrack_entry_t *e;
 
-        if ( ! rack ) return SLURM_ERROR;
+	if ( ! rack )
+		return SLURM_ERROR;
 
-        it = list_iterator_create( rack->entries );
-        while ( ( e = list_next( it ) ) != NULL ) {
-                if ( ( e->plug != PLUGIN_INVALID_HANDLE ) &&
+	it = list_iterator_create( rack->entries );
+	while ( ( e = list_next( it ) ) != NULL ) {
+		if ( ( e->plug != PLUGIN_INVALID_HANDLE ) &&
 		     ( e->refcount == 0 ) ){
-                        plugin_unload( e->plug );
-                        e->plug = PLUGIN_INVALID_HANDLE;
-                }
-        }
+			plugin_unload( e->plug );
+			e->plug = PLUGIN_INVALID_HANDLE;
+		}
+	}
 
-        list_iterator_destroy( it );
-        return SLURM_SUCCESS;
+	list_iterator_destroy( it );
+	return SLURM_SUCCESS;
 }
 
 
 int
 plugrack_load_all( plugrack_t rack )
 {
-        ListIterator it;
-        plugrack_entry_t *e;
+	ListIterator it;
+	plugrack_entry_t *e;
 
-        if ( ! rack ) return SLURM_ERROR;
+	if ( ! rack )
+		return SLURM_ERROR;
 
-        it = list_iterator_create( rack->entries );
-        while ( ( e = list_next( it ) ) != NULL ) {
-                if ( e->plug == PLUGIN_INVALID_HANDLE ) {
-                        plugin_load_from_file(&e->plug, e->fq_path);
-                }
-        }
+	it = list_iterator_create( rack->entries );
+	while ( ( e = list_next( it ) ) != NULL ) {
+		if ( e->plug == PLUGIN_INVALID_HANDLE ) {
+			plugin_load_from_file(&e->plug, e->fq_path);
+		}
+	}
 
-        list_iterator_destroy( it );
-        return SLURM_SUCCESS;
+	list_iterator_destroy( it );
+	return SLURM_SUCCESS;
 }
 
 
 int
 plugrack_write_cache( plugrack_t rack,
-                      const char *cache )
+		      const char *cache )
 {
-        /* Not implemented. */
+	/* Not implemented. */
 
-        return SLURM_SUCCESS;
+	return SLURM_SUCCESS;
 }
 
 plugin_handle_t
 plugrack_use_by_type( plugrack_t rack,
-                      const char *full_type )
+		      const char *full_type )
 {
 	ListIterator it;
 	plugrack_entry_t *e;
 
-	if (!rack)
-		return PLUGIN_INVALID_HANDLE;
-	if (!full_type)
+	if ( (!rack) || (!full_type) )
 		return PLUGIN_INVALID_HANDLE;
 
 	it = list_iterator_create(rack->entries);
@@ -611,39 +613,43 @@ plugrack_use_by_type( plugrack_t rack,
 int
 plugrack_finished_with_plugin( plugrack_t rack, plugin_handle_t plug )
 {
-        ListIterator it;
-        plugrack_entry_t *e;
+	ListIterator it;
+	plugrack_entry_t *e;
 
-        if ( ! rack ) return SLURM_ERROR;
+	if ( ! rack )
+		return SLURM_ERROR;
 
-        it = list_iterator_create( rack->entries );
-        while ( ( e = list_next( it ) ) != NULL ) {
-                if ( e->plug == plug ) {
-                        e->refcount--;
-                        if ( e->refcount < 0 ) e->refcount = 0;
+	it = list_iterator_create( rack->entries );
+	while ( ( e = list_next( it ) ) != NULL ) {
+		if ( e->plug == plug ) {
+			e->refcount--;
+			if ( e->refcount < 0 )
+				e->refcount = 0;
 
-                        /* Do something here with purge policy. */
+			/* Do something here with purge policy. */
 
-                        list_iterator_destroy( it );
-                        return SLURM_SUCCESS;
-                }
-        }
+			list_iterator_destroy( it );
+			return SLURM_SUCCESS;
+		}
+	}
 
-        /* Plugin not in this rack. */
-        list_iterator_destroy( it );
-        return SLURM_ERROR;
+	/* Plugin not in this rack. */
+	list_iterator_destroy( it );
+	return SLURM_ERROR;
 }
 
 int
 plugrack_print_all_plugin(plugrack_t rack)
 {
 	ListIterator itr;
-        plugrack_entry_t *e = NULL;
+	plugrack_entry_t *e = NULL;
+
 	itr = list_iterator_create(rack->entries);
 	info("MPI types are...");
 	while ((e = list_next(itr)) != NULL ) {
 		info("%s",e->full_type);
 	}
 	list_iterator_destroy(itr);
+
 	return SLURM_SUCCESS;
 }
diff --git a/src/common/plugstack.c b/src/common/plugstack.c
index cd99c5abdd38879febdb46f5c90240f565296ce8..b4a53dbcbfa6b2906bc5e6b7e1b5f2b35c9a8435 100644
--- a/src/common/plugstack.c
+++ b/src/common/plugstack.c
@@ -371,8 +371,11 @@ _spank_stack_process_line(const char *file, int line, char *buf, List *stackp)
 		return (0);
 	}
 
-	if (type == CF_INCLUDE)
-		return (_spank_conf_include (file, line, path, stackp));
+       if (type == CF_INCLUDE) {
+               int rc = _spank_conf_include (file, line, path, stackp);
+               xfree (path);
+               return (rc);
+       }
 
 	if (path == NULL)	/* No plugin listed on this line */
 		return (0);
diff --git a/src/common/proc_args.c b/src/common/proc_args.c
index 04bc3448b8c28195b77a5061f98dfda34236f7c7..55781b9a49857320b7ec2565712b4beee3942719 100644
--- a/src/common/proc_args.c
+++ b/src/common/proc_args.c
@@ -536,7 +536,8 @@ bool verify_socket_core_thread_count(const char *arg, int *min_sockets,
  * RET true if valid
  */
 bool verify_hint(const char *arg, int *min_sockets, int *min_cores,
-		 int *min_threads, cpu_bind_type_t *cpu_bind_type)
+		 int *min_threads, int *ntasks_per_core, 
+		 cpu_bind_type_t *cpu_bind_type)
 {
 	char *buf, *p, *tok;
 	if (!arg) {
@@ -577,6 +578,8 @@ bool verify_hint(const char *arg, int *min_sockets, int *min_cores,
 		} else if (strcasecmp(tok, "nomultithread") == 0) {
 		        *min_threads = 1;
 			*cpu_bind_type |= CPU_BIND_TO_THREADS;
+			if (*ntasks_per_core == NO_VAL)
+				*ntasks_per_core = 1;
 		} else {
 			error("unrecognized --hint argument \"%s\", "
 			      "see --hint=help", tok);
diff --git a/src/common/proc_args.h b/src/common/proc_args.h
index a2611e574c67e5aff11da3b586cf024eae27eb5b..2e75bd816325ac612664ce2160f459d4721f6536 100644
--- a/src/common/proc_args.h
+++ b/src/common/proc_args.h
@@ -108,7 +108,8 @@ bool verify_socket_core_thread_count(const char *arg, int *min_sockets,
 
 /* verify a hint and convert it into the implied settings */
 bool verify_hint(const char *arg, int *min_sockets, int *min_cores,
-		 int *min_threads, cpu_bind_type_t *cpu_bind_type);
+		 int *min_threads, int *ntasks_per_core,
+		 cpu_bind_type_t *cpu_bind_type);
 
 /* parse the mail type */
 uint16_t parse_mail_type(const char *arg);
diff --git a/src/common/read_config.c b/src/common/read_config.c
index a0e43fe290044d8cf2b10705b21e03120d6d19d8..25529eae0191978221b274d5f33d53ba1cfbb67a 100644
--- a/src/common/read_config.c
+++ b/src/common/read_config.c
@@ -371,6 +371,13 @@ static void _set_node_prefix(const char *nodenames)
 		       && nodenames[i-1] >= '0'))
 			break;
 	}
+
+	if(i == 1) {
+		error("In your Node definition in your slurm.conf you "
+		      "gave a nodelist '%s' without a prefix.  "
+		      "Please try something like bg%s.", nodenames, nodenames);
+	}
+
 	xfree(conf_ptr->node_prefix);
 	if(nodenames[i] == '\0')
 		conf_ptr->node_prefix = xstrdup(nodenames);
@@ -1803,6 +1810,16 @@ _validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 		conf->batch_start_timeout = DEFAULT_BATCH_START_TIMEOUT;
 
 	s_p_get_string(&conf->cluster_name, "ClusterName", hashtbl);
+	/* Some databases are case sensitive so we have to make sure
+	   the cluster name is lower case since sacctmgr makes sure
+	   this is the case as well.
+	*/
+	if(conf->cluster_name) {
+		int i;
+		for (i = 0; conf->cluster_name[i] != '\0'; i++)
+			conf->cluster_name[i] =
+				(char)tolower(conf->cluster_name[i]);
+	}
 
 	if (!s_p_get_uint16(&conf->complete_wait, "CompleteWait", hashtbl))
 		conf->complete_wait = DEFAULT_COMPLETE_WAIT;
@@ -2256,7 +2273,7 @@ _validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 #ifdef HAVE_BG
 	if ((conf->preempt_mode & PREEMPT_MODE_GANG) ||
 	    (conf->preempt_mode & PREEMPT_MODE_SUSPEND))
-		fatal("PreemptMode incompatable with BlueGene systems");
+		fatal("PreemptMode incompatible with BlueGene systems");
 #endif
 
 	if (s_p_get_string(&temp_str, "PriorityDecayHalfLife", hashtbl)) {
diff --git a/src/common/slurm_jobacct_gather.c b/src/common/slurm_jobacct_gather.c
index 94be48631cdf908bb07f6b95ce01443c390827d3..4a64de26c1b5220cc7a249ac32fecc2892e2c97f 100644
--- a/src/common/slurm_jobacct_gather.c
+++ b/src/common/slurm_jobacct_gather.c
@@ -75,8 +75,8 @@ typedef struct slurm_jobacct_gather_ops {
 	int (*jobacct_gather_getinfo)        (jobacctinfo_t *jobacct,
 					      enum jobacct_data_type type,
 					      void *data);
-	void (*jobacct_gather_pack)   (jobacctinfo_t *jobacct, Buf buffer);
-	int (*jobacct_gather_unpack)  (jobacctinfo_t **jobacct, Buf buffer);
+	void (*jobacct_gather_pack)   (jobacctinfo_t *jobacct, uint16_t rpc_version, Buf buffer);
+	int (*jobacct_gather_unpack)  (jobacctinfo_t **jobacct, uint16_t rpc_version, Buf buffer);
 	void (*jobacct_gather_aggregate)     (jobacctinfo_t *dest,
 					      jobacctinfo_t *from);
 	int (*jobacct_gather_startpoll)      (uint16_t frequency);
@@ -362,7 +362,8 @@ extern int jobacct_gather_g_getinfo(jobacctinfo_t *jobacct,
 	return retval;
 }
 
-extern void jobacct_gather_g_pack(jobacctinfo_t *jobacct, Buf buffer)
+extern void jobacct_gather_g_pack(jobacctinfo_t *jobacct,
+				  uint16_t rpc_version, Buf buffer)
 {
 	if (_slurm_jobacct_gather_init() < 0)
 		return;
@@ -370,12 +371,13 @@ extern void jobacct_gather_g_pack(jobacctinfo_t *jobacct, Buf buffer)
 	slurm_mutex_lock( &g_jobacct_gather_context_lock );
 	if ( g_jobacct_gather_context )
 		(*(g_jobacct_gather_context->ops.jobacct_gather_pack))
-			(jobacct, buffer);
+			(jobacct, rpc_version, buffer);
 	slurm_mutex_unlock( &g_jobacct_gather_context_lock );
 	return;
 }
 
-extern int jobacct_gather_g_unpack(jobacctinfo_t **jobacct, Buf buffer)
+extern int jobacct_gather_g_unpack(jobacctinfo_t **jobacct,
+				   uint16_t rpc_version, Buf buffer)
 {
 	int retval = SLURM_SUCCESS;
 
@@ -385,7 +387,8 @@ extern int jobacct_gather_g_unpack(jobacctinfo_t **jobacct, Buf buffer)
 	slurm_mutex_lock( &g_jobacct_gather_context_lock );
 	if ( g_jobacct_gather_context )
 		retval = (*(g_jobacct_gather_context->
-			    ops.jobacct_gather_unpack))(jobacct, buffer);
+			    ops.jobacct_gather_unpack))
+			(jobacct, rpc_version, buffer);
 	slurm_mutex_unlock( &g_jobacct_gather_context_lock );
 	return retval;
 }
diff --git a/src/common/slurm_jobacct_gather.h b/src/common/slurm_jobacct_gather.h
index dac9b4cbcde067a1da991bb2fe36b007ce3683ea..07471cde69edb4d7378c248a5162c7097eb569bc 100644
--- a/src/common/slurm_jobacct_gather.h
+++ b/src/common/slurm_jobacct_gather.h
@@ -86,8 +86,10 @@ extern int jobacct_gather_g_setinfo(jobacctinfo_t *jobacct,
 				    enum jobacct_data_type type, void *data);
 extern int jobacct_gather_g_getinfo(jobacctinfo_t *jobacct,
 				    enum jobacct_data_type type, void *data);
-extern void jobacct_gather_g_pack(jobacctinfo_t *jobacct, Buf buffer);
-extern int jobacct_gather_g_unpack(jobacctinfo_t **jobacct, Buf buffer);
+extern void jobacct_gather_g_pack(jobacctinfo_t *jobacct,
+				  uint16_t rpc_version, Buf buffer);
+extern int jobacct_gather_g_unpack(jobacctinfo_t **jobacct,
+				   uint16_t rpc_version, Buf buffer);
 
 extern void jobacct_gather_g_aggregate(jobacctinfo_t *dest,
 				       jobacctinfo_t *from);
diff --git a/src/common/slurm_protocol_defs.h b/src/common/slurm_protocol_defs.h
index 7002f1596b8e4b10ec7aad33ad9d31a2df45c19a..7df7c7f49ec865585bfc43976a3ec6243a6ebdd6 100644
--- a/src/common/slurm_protocol_defs.h
+++ b/src/common/slurm_protocol_defs.h
@@ -1010,8 +1010,10 @@ void slurm_free_job_step_info_response_msg(
 		job_step_info_response_msg_t * msg);
 void slurm_free_node_info_msg(node_info_msg_t * msg);
 void slurm_free_partition_info_msg(partition_info_msg_t * msg);
+void slurm_free_reservation_info_msg(reserve_info_msg_t * msg);
 void slurm_free_get_kvs_msg(kvs_get_msg_t *msg);
 void slurm_free_will_run_response_msg(will_run_response_msg_t *msg);
+void slurm_free_topo_info_msg(topo_info_response_msg_t *msg);
 void inline slurm_free_file_bcast_msg(file_bcast_msg_t *msg);
 void inline slurm_free_step_complete_msg(step_complete_msg_t *msg);
 void inline slurm_free_stat_jobacct_msg(stat_jobacct_msg_t *msg);
diff --git a/src/common/slurm_protocol_pack.c b/src/common/slurm_protocol_pack.c
index d95c13ca551714b055dc1cfe5cfe1f6764de8401..6dfefb67f05e7210d42c162752e08bb1bc93211a 100644
--- a/src/common/slurm_protocol_pack.c
+++ b/src/common/slurm_protocol_pack.c
@@ -4237,7 +4237,8 @@ _pack_stat_jobacct_msg(stat_jobacct_msg_t * msg, Buf buffer)
 	pack32((uint32_t)msg->return_code, buffer);
 	pack32((uint32_t)msg->step_id, buffer);
 	pack32((uint32_t)msg->num_tasks, buffer);
-	jobacct_gather_g_pack(msg->jobacct, buffer);
+	jobacct_gather_g_pack(msg->jobacct,
+			      SLURMDBD_VERSION, buffer);
 }
 
 
@@ -4253,7 +4254,9 @@ _unpack_stat_jobacct_msg(stat_jobacct_msg_t ** msg_ptr, Buf buffer)
 	safe_unpack32(&msg->return_code, buffer);
 	safe_unpack32(&msg->step_id, buffer);
 	safe_unpack32(&msg->num_tasks, buffer);
-	if (jobacct_gather_g_unpack(&msg->jobacct, buffer) != SLURM_SUCCESS)
+	if (jobacct_gather_g_unpack(&msg->jobacct,
+				    SLURMDBD_VERSION, buffer)
+	    != SLURM_SUCCESS)
 		goto unpack_error;
 
 	return SLURM_SUCCESS;
@@ -4302,7 +4305,8 @@ _pack_step_complete_msg(step_complete_msg_t * msg, Buf buffer)
 	pack32((uint32_t)msg->range_first, buffer);
 	pack32((uint32_t)msg->range_last, buffer);
 	pack32((uint32_t)msg->step_rc, buffer);
-	jobacct_gather_g_pack(msg->jobacct, buffer);
+	jobacct_gather_g_pack(
+		msg->jobacct, SLURMDBD_VERSION, buffer);
 }
 
 static int
@@ -4318,7 +4322,9 @@ _unpack_step_complete_msg(step_complete_msg_t ** msg_ptr, Buf buffer)
 	safe_unpack32(&msg->range_first, buffer);
 	safe_unpack32(&msg->range_last, buffer);
 	safe_unpack32(&msg->step_rc, buffer);
-	if (jobacct_gather_g_unpack(&msg->jobacct, buffer) != SLURM_SUCCESS)
+	if (jobacct_gather_g_unpack(&msg->jobacct,
+				    SLURMDBD_VERSION, buffer)
+	    != SLURM_SUCCESS)
 		goto unpack_error;
 
 	return SLURM_SUCCESS;
diff --git a/src/common/slurmdbd_defs.c b/src/common/slurmdbd_defs.c
index 5c6f7951c57ea022f7a12e7bb8a26dd670085c64..e188c8228732638befe6cb97ed3eb3208b9f415b 100644
--- a/src/common/slurmdbd_defs.c
+++ b/src/common/slurmdbd_defs.c
@@ -3508,7 +3508,7 @@ slurmdbd_pack_step_complete_msg(uint16_t rpc_version,
 	pack32(msg->db_index, buffer);
 	pack_time(msg->end_time, buffer);
 	pack32(msg->exit_code, buffer);
-	jobacct_common_pack((struct jobacctinfo *)msg->jobacct, buffer);
+	jobacct_common_pack((struct jobacctinfo *)msg->jobacct, rpc_version, buffer);
 	pack32(msg->job_id, buffer);
 	pack32(msg->req_uid, buffer);
 	pack_time(msg->start_time, buffer);
@@ -3527,7 +3527,7 @@ slurmdbd_unpack_step_complete_msg(uint16_t rpc_version,
 	safe_unpack32(&msg_ptr->db_index, buffer);
 	safe_unpack_time(&msg_ptr->end_time, buffer);
 	safe_unpack32(&msg_ptr->exit_code, buffer);
-	jobacct_common_unpack((struct jobacctinfo **)&msg_ptr->jobacct, buffer);
+	jobacct_common_unpack((struct jobacctinfo **)&msg_ptr->jobacct, rpc_version, buffer);
 	safe_unpack32(&msg_ptr->job_id, buffer);
 	safe_unpack32(&msg_ptr->req_uid, buffer);
 	safe_unpack_time(&msg_ptr->start_time, buffer);
diff --git a/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c b/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c
index 2d1cb54e2f6d99dad15a4ae7495115139f2ce8b4..7b7810f7250b69d4e39bcfbfb5aff84265cc3a05 100644
--- a/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c
+++ b/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c
@@ -321,9 +321,9 @@ static uint32_t _get_wckeyid(mysql_conn_t *mysql_conn, char **name,
 			wckey_ptr->user = xstrdup(user);
 			wckey_ptr->cluster = xstrdup(cluster);
 			list_append(wckey_list, wckey_ptr);
-/* 			info("adding wckey '%s' '%s' '%s'", */
-/* 				     wckey_ptr->name, wckey_ptr->user, */
-/* 				     wckey_ptr->cluster); */
+			/* info("adding wckey '%s' '%s' '%s'", */
+			/* 	     wckey_ptr->name, wckey_ptr->user, */
+			/* 	     wckey_ptr->cluster); */
 			/* we have already checked to make
 			   sure this was the slurm user before
 			   calling this */
@@ -341,7 +341,7 @@ static uint32_t _get_wckeyid(mysql_conn_t *mysql_conn, char **name,
 			list_destroy(wckey_list);
 		}
 		xfree(user);
-		//info("got wckeyid of %d", wckey_rec.id);
+		/* info("got wckeyid of %d", wckey_rec.id); */
 		wckeyid = wckey_rec.id;
 	}
 no_wckeyid:
@@ -1783,7 +1783,7 @@ static int _addto_update_list(List update_list, acct_update_type_t type,
 /* This should take care of all the lft and rgts when you move an
  * account.  This handles deleted associations also.
  */
-static int _move_account(mysql_conn_t *mysql_conn, uint32_t lft, uint32_t rgt,
+static int _move_account(mysql_conn_t *mysql_conn, uint32_t *lft, uint32_t *rgt,
 			 char *cluster,
 			 char *id, char *parent, time_t now)
 {
@@ -1813,7 +1813,7 @@ static int _move_account(mysql_conn_t *mysql_conn, uint32_t lft, uint32_t rgt,
 	par_left = atoi(row[0]);
 	mysql_free_result(result);
 
-	diff = ((par_left + 1) - lft);
+	diff = ((par_left + 1) - *lft);
 
 	if(diff == 0) {
 		debug3("Trying to move association to the same position?  "
@@ -1821,7 +1821,7 @@ static int _move_account(mysql_conn_t *mysql_conn, uint32_t lft, uint32_t rgt,
 		return ESLURM_SAME_PARENT_ACCOUNT;
 	}
 
-	width = (rgt - lft + 1);
+	width = (*rgt - *lft + 1);
 
 	/* every thing below needs to be a %d not a %u because we are
 	   looking for -1 */
@@ -1829,7 +1829,7 @@ static int _move_account(mysql_conn_t *mysql_conn, uint32_t lft, uint32_t rgt,
 		   "update %s set mod_time=%d, deleted = deleted + 2, "
 		   "lft = lft + %d, rgt = rgt + %d "
 		   "WHERE lft BETWEEN %d AND %d;",
-		   assoc_table, now, diff, diff, lft, rgt);
+		   assoc_table, now, diff, diff, *lft, *rgt);
 
 	xstrfmtcat(query,
 		   "UPDATE %s SET mod_time=%d, rgt = rgt + %d WHERE "
@@ -1849,11 +1849,11 @@ static int _move_account(mysql_conn_t *mysql_conn, uint32_t lft, uint32_t rgt,
 		   "(%d < 0 && lft > %d && deleted < 2) "
 		   "|| (%d > 0 && lft > %d);",
 		   assoc_table, now, width,
-		   diff, rgt,
-		   diff, lft,
+		   diff, *rgt,
+		   diff, *lft,
 		   assoc_table, now, width,
-		   diff, rgt,
-		   diff, lft);
+		   diff, *rgt,
+		   diff, *lft);
 
 	xstrfmtcat(query,
 		   "update %s set mod_time=%d, "
@@ -1863,9 +1863,23 @@ static int _move_account(mysql_conn_t *mysql_conn, uint32_t lft, uint32_t rgt,
 		   "update %s set mod_time=%d, "
 		   "parent_acct=\"%s\" where id = %s;",
 		   assoc_table, now, parent, id);
+	/* get the new lft and rgt if changed */
+	xstrfmtcat(query,
+		   "select lft, rgt from %s where id = %s",
+		   assoc_table, id);
 	debug3("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
-	rc = mysql_db_query(mysql_conn->db_conn, query);
+	if(!(result = mysql_db_query_ret(mysql_conn->db_conn, query, 1))) {
+		xfree(query);
+		return SLURM_ERROR;
+	}
 	xfree(query);
+	if((row = mysql_fetch_row(result))) {
+		debug4("lft and rgt were %u %u and now is %s %s",
+		       *lft, *rgt, row[0], row[1]);
+		*lft = atoi(row[0]);
+		*rgt = atoi(row[1]);
+	}
+	mysql_free_result(result);
 
 	return rc;
 }
@@ -1876,7 +1890,7 @@ static int _move_account(mysql_conn_t *mysql_conn, uint32_t lft, uint32_t rgt,
  * of current parent, and parent to be child of child.)
  */
 static int _move_parent(mysql_conn_t *mysql_conn, uid_t uid,
-			uint32_t lft, uint32_t rgt,
+			uint32_t *lft, uint32_t *rgt,
 			char *cluster,
 			char *id, char *old_parent, char *new_parent,
 			time_t now)
@@ -1893,7 +1907,7 @@ static int _move_parent(mysql_conn_t *mysql_conn, uid_t uid,
 	query = xstrdup_printf(
 		"select id, lft, rgt from %s where lft between %d and %d "
 		"&& acct=\"%s\" && user='' order by lft;",
-		assoc_table, lft, rgt,
+		assoc_table, *lft, *rgt,
 		new_parent);
 	debug3("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
 	if(!(result =
@@ -1904,9 +1918,11 @@ static int _move_parent(mysql_conn_t *mysql_conn, uid_t uid,
 	xfree(query);
 
 	if((row = mysql_fetch_row(result))) {
+		uint32_t child_lft = atoi(row[1]), child_rgt = atoi(row[2]);
+
 		debug4("%s(%s) %s,%s is a child of %s",
 		       new_parent, row[0], row[1], row[2], id);
-		rc = _move_account(mysql_conn, atoi(row[1]), atoi(row[2]),
+		rc = _move_account(mysql_conn, &child_lft, &child_rgt,
 				   cluster, row[0], old_parent, now);
 	}
 
@@ -1931,7 +1947,9 @@ static int _move_parent(mysql_conn_t *mysql_conn, uid_t uid,
 	xfree(query);
 
 	if((row = mysql_fetch_row(result))) {
-		rc = _move_account(mysql_conn, atoi(row[0]), atoi(row[1]),
+		*lft = atoi(row[0]);
+		*rgt = atoi(row[1]);
+		rc = _move_account(mysql_conn, lft, rgt,
 				   cluster, id, new_parent, now);
 	} else {
 		error("can't find parent? we were able to a second ago.");
@@ -2097,7 +2115,6 @@ static int _modify_unset_users(mysql_conn_t *mysql_conn,
 
 		mod_assoc = xmalloc(sizeof(acct_association_rec_t));
 		init_acct_association_rec(mod_assoc);
-
 		mod_assoc->id = atoi(row[ASSOC_ID]);
 
 		if(!row[ASSOC_MJ] && assoc->max_jobs != NO_VAL) {
@@ -4567,6 +4584,9 @@ extern int acct_storage_p_add_associations(mysql_conn_t *mysql_conn,
 			xfree(extra);
 			continue;
 		} else {
+			uint32_t lft = atoi(row[MASSOC_LFT]);
+			uint32_t rgt = atoi(row[MASSOC_RGT]);
+
 			/* If it was once deleted we have kept the lft
 			 * and rgt's consant while it was deleted and
 			 * so we can just unset the deleted flag,
@@ -4579,8 +4599,7 @@ extern int acct_storage_p_add_associations(mysql_conn_t *mysql_conn,
 
 				/* We need to move the parent! */
 				if(_move_parent(mysql_conn, uid,
-						atoi(row[MASSOC_LFT]),
-						atoi(row[MASSOC_RGT]),
+						&lft, &rgt,
 						object->cluster,
 						row[MASSOC_ID],
 						row[MASSOC_PACCT],
@@ -4589,8 +4608,8 @@ extern int acct_storage_p_add_associations(mysql_conn_t *mysql_conn,
 					continue;
 				moved_parent = 1;
 			} else {
-				object->lft = atoi(row[MASSOC_LFT]);
-				object->rgt = atoi(row[MASSOC_RGT]);
+				object->lft = lft;
+				object->rgt = rgt;
 			}
 
 			affect_rows = 2;
@@ -5412,20 +5431,11 @@ extern List acct_storage_p_modify_clusters(mysql_conn_t *mysql_conn,
 		return NULL;
 	}
 
-	/* Set here is used to ask for jobs and nodes in anything
-	 * other than up state, so it you reset it later make sure
-	 * this is accounted for before you do
-	 */
-	set = 1;
 	rc = 0;
 	ret_list = list_create(slurm_destroy_char);
 	while((row = mysql_fetch_row(result))) {
 		object = xstrdup(row[0]);
 
-		/* check to see if this is the first time to register */
-		if(clust_reg && (row[1][0] == '0'))
-			set = 0;
-
 		list_append(ret_list, object);
 		if(!rc) {
 			xstrfmtcat(name_char, "name='%s'", object);
@@ -5459,50 +5469,6 @@ extern List acct_storage_p_modify_clusters(mysql_conn_t *mysql_conn,
 		}
 	}
 
-	/* Get all nodes in a down state and jobs pending or running.
-	 * This is for the first time a cluster registers
-	 */
-
-	if(!set && slurmdbd_conf) {
-		/* This only happens here with the slurmdbd.  If
-		 * calling this plugin directly we do this in
-		 * clusteracct_storage_p_cluster_procs.
-		 */
-		slurm_addr ctld_address;
-		slurm_fd fd;
-
-		info("First time to register cluster requesting "
-		     "running jobs and system information.");
-
-		slurm_set_addr_char(&ctld_address, cluster->control_port,
-				    cluster->control_host);
-		fd = slurm_open_msg_conn(&ctld_address);
-		if (fd < 0) {
-			error("can not open socket back to slurmctld "
-			      "%s(%u): %m", cluster->control_host,
-			      cluster->control_port);
-		} else {
-			slurm_msg_t out_msg;
-			accounting_update_msg_t update;
-			/* We have to put this update message here so
-			   we can tell the sender to send the correct
-			   RPC version.
-			*/
-			memset(&update, 0, sizeof(accounting_update_msg_t));
-			update.rpc_version = cluster->rpc_version;
-			slurm_msg_t_init(&out_msg);
-			out_msg.msg_type = ACCOUNTING_FIRST_REG;
-			out_msg.flags = SLURM_GLOBAL_AUTH_KEY;
-			out_msg.data = &update;
-			slurm_send_node_msg(fd, &out_msg);
-			/* We probably need to add matching recv_msg function
-			 * for an arbitray fd or should these be fire
-			 * and forget?  For this, that we can probably
-			 * forget about it */
-			slurm_close_stream(fd);
-		}
-	}
-
 end_it:
 	xfree(name_char);
 	xfree(vals);
@@ -5655,6 +5621,12 @@ extern List acct_storage_p_modify_associations(
 	while((row = mysql_fetch_row(result))) {
 		acct_association_rec_t *mod_assoc = NULL;
 		int account_type=0;
+		/* If parent changes these also could change
+		   so we need to keep track of the latest
+		   ones.
+		*/
+		uint32_t lft = atoi(row[MASSOC_LFT]);
+		uint32_t rgt = atoi(row[MASSOC_RGT]);
 
 		if(!is_admin) {
 			acct_coord_rec_t *coord = NULL;
@@ -5743,8 +5715,7 @@ extern List acct_storage_p_modify_associations(
 					continue;
 				}
 				rc = _move_parent(mysql_conn, uid,
-						  atoi(row[MASSOC_LFT]),
-						  atoi(row[MASSOC_RGT]),
+						  &lft, &rgt,
 						  row[MASSOC_CLUSTER],
 						  row[MASSOC_ID],
 						  row[MASSOC_PACCT],
@@ -5875,8 +5846,7 @@ extern List acct_storage_p_modify_associations(
 			_modify_unset_users(mysql_conn,
 					    mod_assoc,
 					    row[MASSOC_ACCT],
-					    atoi(row[MASSOC_LFT]),
-					    atoi(row[MASSOC_RGT]),
+					    lft, rgt,
 					    ret_list,
 					    moved_parent);
 		}
@@ -8259,7 +8229,7 @@ extern List acct_storage_p_get_associations(mysql_conn_t *mysql_conn,
 	MYSQL_RES *result = NULL;
 	MYSQL_ROW row;
 	uint32_t parent_mj = INFINITE;
-        uint32_t parent_msj = INFINITE;
+	uint32_t parent_msj = INFINITE;
 	uint32_t parent_mcpj = INFINITE;
 	uint32_t parent_mnpj = INFINITE;
 	uint32_t parent_mwpj = INFINITE;
@@ -10333,17 +10303,12 @@ extern int clusteracct_storage_p_cluster_procs(mysql_conn_t *mysql_conn,
 		/* Get all nodes in a down state and jobs pending or running.
 		 * This is for the first time a cluster registers
 		 *
-		 * This only happens here when calling the plugin directly.  If
-		 * calling this plugin throught the slurmdbd we do this in
-		 * acct_storage_p_modify_clusters.
+		 * We will return ACCOUNTING_FIRST_REG so this
+		 * is taken care of since the message thread
+		 * may not be up when we run this in the controller or
+		 * in the slurmdbd.
 		 */
-		if(!slurmdbd_conf) {
-			/* We will return ACCOUNTING_FIRST_REG so this
-			   is taken care of since the message thread
-			   may not be up when we run this in the controller.
-			*/
-			first = 1;
-		}
+		first = 1;
 		goto add_it;
 	}
 
@@ -10642,8 +10607,10 @@ no_rollup_change:
 #endif
 	}
 
-	/* if there is a start_time get the wckeyid */
-	if(job_ptr->start_time && job_ptr->assoc_id)
+	/* If there is a start_time get the wckeyid.  If the job is
+	 * cancelled before the job starts we also want to grab it. */
+	if(job_ptr->assoc_id
+	   && (job_ptr->start_time || IS_JOB_CANCELLED(job_ptr)))
 		wckeyid = _get_wckeyid(mysql_conn, &job_ptr->wckey,
 				       job_ptr->user_id, cluster_name,
 				       job_ptr->assoc_id);
diff --git a/src/plugins/jobacct_gather/aix/jobacct_gather_aix.c b/src/plugins/jobacct_gather/aix/jobacct_gather_aix.c
index 59e431b9902139a0329680c2b7d62d430bfc70bd..51908c69278ced79827b04670445f28cb58eba45 100644
--- a/src/plugins/jobacct_gather/aix/jobacct_gather_aix.c
+++ b/src/plugins/jobacct_gather/aix/jobacct_gather_aix.c
@@ -420,14 +420,16 @@ extern int jobacct_gather_p_getinfo(struct jobacctinfo *jobacct,
 	return jobacct_common_getinfo(jobacct, type, data);
 }
 
-extern void jobacct_gather_p_pack(struct jobacctinfo *jobacct, Buf buffer)
+extern void jobacct_gather_p_pack(struct jobacctinfo *jobacct,
+				  uint16_t rpc_version, Buf buffer)
 {
-	jobacct_common_pack(jobacct, buffer);
+	jobacct_common_pack(jobacct, rpc_version, buffer);
 }
 
-extern int jobacct_gather_p_unpack(struct jobacctinfo **jobacct, Buf buffer)
+extern int jobacct_gather_p_unpack(struct jobacctinfo **jobacct,
+				   uint16_t rpc_version, Buf buffer)
 {
-	return jobacct_common_unpack(jobacct, buffer);
+	return jobacct_common_unpack(jobacct, rpc_version, buffer);
 }
 
 extern void jobacct_gather_p_aggregate(struct jobacctinfo *dest,
diff --git a/src/plugins/jobacct_gather/linux/jobacct_gather_linux.c b/src/plugins/jobacct_gather/linux/jobacct_gather_linux.c
index aa59b1e2a5fc917bedd34401d749250bb803c100..823bfe371b2c479595b50fdbd6364a69316a6823 100644
--- a/src/plugins/jobacct_gather/linux/jobacct_gather_linux.c
+++ b/src/plugins/jobacct_gather/linux/jobacct_gather_linux.c
@@ -533,14 +533,16 @@ extern int jobacct_gather_p_getinfo(struct jobacctinfo *jobacct,
 	return jobacct_common_getinfo(jobacct, type, data);
 }
 
-extern void jobacct_gather_p_pack(struct jobacctinfo *jobacct, Buf buffer)
+extern void jobacct_gather_p_pack(struct jobacctinfo *jobacct,
+				  uint16_t rpc_version,  Buf buffer)
 {
-	jobacct_common_pack(jobacct, buffer);
+	jobacct_common_pack(jobacct, rpc_version, buffer);
 }
 
-extern int jobacct_gather_p_unpack(struct jobacctinfo **jobacct, Buf buffer)
+extern int jobacct_gather_p_unpack(struct jobacctinfo **jobacct,
+				   uint16_t rpc_version, Buf buffer)
 {
-	return jobacct_common_unpack(jobacct, buffer);
+	return jobacct_common_unpack(jobacct, rpc_version, buffer);
 }
 
 extern void jobacct_gather_p_aggregate(struct jobacctinfo *dest,
diff --git a/src/plugins/sched/wiki/msg.c b/src/plugins/sched/wiki/msg.c
index 5d96d720dcc6cc4b096a7238f3401f1b751de2f3..b496e16d13b8f8d7cc13268ab50e1b95eb999105 100644
--- a/src/plugins/sched/wiki/msg.c
+++ b/src/plugins/sched/wiki/msg.c
@@ -61,6 +61,7 @@ uint16_t e_port = 0;
 struct   part_record *exclude_part_ptr[EXC_PART_CNT];
 struct   part_record *hide_part_ptr[HIDE_PART_CNT];
 uint16_t job_aggregation_time = 10;	/* Default value is 10 seconds */
+uint16_t host_format;
 int      init_prio_mode = PRIO_HOLD;
 uint16_t kill_wait;
 uint16_t use_host_exp = 0;
@@ -246,6 +247,7 @@ extern int parse_wiki_config(void)
 		{"EPort", S_P_UINT16},
 		{"ExcludePartitions", S_P_STRING},
 		{"HidePartitionJobs", S_P_STRING},
+		{"HostFormat", S_P_UINT16},
 		{"JobAggregationTime", S_P_UINT16},
 		{"JobPriority", S_P_STRING},
 		{NULL} };
@@ -298,7 +300,10 @@ extern int parse_wiki_config(void)
 		xfree(key);
 	}
 	s_p_get_uint16(&e_port, "EPort", tbl);
-	s_p_get_uint16(&job_aggregation_time, "JobAggregationTime", tbl);
+	if (s_p_get_uint16(&job_aggregation_time, "JobAggregationTime", tbl))
+		error("JobAggregationTime not used by sched/wiki");
+	if (s_p_get_uint16(&host_format, "HostFormat", tbl))
+		error("HostFormat not used by sched/wiki");
 
 	if (s_p_get_string(&exclude_partitions, "ExcludePartitions", tbl)) {
 		char *tok = NULL, *tok_p = NULL;
diff --git a/src/plugins/select/bluegene/plugin/bg_block_info.c b/src/plugins/select/bluegene/plugin/bg_block_info.c
index 0ed61d34c4b2063adfa69bba82b641815aa7cbc9..cce43054034ea56c0dc351d049a25dafe2b48cdf 100644
--- a/src/plugins/select/bluegene/plugin/bg_block_info.c
+++ b/src/plugins/select/bluegene/plugin/bg_block_info.c
@@ -1,7 +1,7 @@
 /*****************************************************************************\
  *  bg_block_info.c - bluegene block information from the db2 database.
  *
- *  $Id: bg_block_info.c 19095 2009-12-01 22:59:18Z da $
+ *  $Id: bg_block_info.c 19270 2010-01-19 19:46:45Z da $
  *****************************************************************************
  *  Copyright (C) 2004-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -268,7 +268,7 @@ extern int update_block_list()
 				switch(rc) {
 				case INCONSISTENT_DATA:
 					debug2("got inconsistent data when "
-					       "quering block %s", name);
+					       "querying block %s", name);
 					continue;
 					break;
 				case PARTITION_NOT_FOUND:
@@ -621,7 +621,7 @@ extern int update_freeing_block_list()
 				switch(rc) {
 				case INCONSISTENT_DATA:
 					debug2("got inconsistent data when "
-					       "quering block %s", name);
+					       "querying block %s", name);
 					continue;
 					break;
 				case PARTITION_NOT_FOUND:
diff --git a/src/plugins/select/bluegene/plugin/bg_job_run.c b/src/plugins/select/bluegene/plugin/bg_job_run.c
index 9eedecabe072c1020cda9d6ae7a7c3509ff600d6..5ec99adefd157195121d57de6b9d6f89ae0580be 100644
--- a/src/plugins/select/bluegene/plugin/bg_job_run.c
+++ b/src/plugins/select/bluegene/plugin/bg_job_run.c
@@ -2,7 +2,7 @@
  *  bg_job_run.c - blue gene job execution (e.g. initiation and termination)
  *  functions.
  *
- *  $Id: bg_job_run.c 19215 2010-01-05 19:20:58Z da $
+ *  $Id: bg_job_run.c 19292 2010-01-21 01:17:17Z da $
  *****************************************************************************
  *  Copyright (C) 2004-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -364,7 +364,7 @@ static void _remove_jobs_on_block_and_reset(rm_job_list_t *job_list,
 		debug2("looking at block %s looking for %s\n",
 		       job_block, block_id);
 
-		if (!strcmp(job_block, block_id)) {
+		if (strcmp(job_block, block_id)) {
 			free(job_block);
 			continue;
 		}
diff --git a/src/plugins/select/bluegene/plugin/block_sys.c b/src/plugins/select/bluegene/plugin/block_sys.c
index fd6e16a723da25b0c2777403c54d3d4d428840f1..267860d630f1bfe2c3039f6fd7ad7fc104dfe0c4 100755
--- a/src/plugins/select/bluegene/plugin/block_sys.c
+++ b/src/plugins/select/bluegene/plugin/block_sys.c
@@ -1,7 +1,7 @@
 /*****************************************************************************\
  *  block_sys.c - component used for wiring up the blocks
  *
- *  $Id: block_sys.c 19173 2009-12-15 22:36:32Z da $
+ *  $Id: block_sys.c 19280 2010-01-20 17:45:13Z da $
  *****************************************************************************
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -1078,13 +1078,23 @@ extern int load_state_file(List curr_block_list, char *dir_name)
 	bitmap = bit_alloc(node_record_count);
 	itr = list_iterator_create(part_list);
 	while ((part_ptr = list_next(itr))) {
-		/* we only want to use bps that are in partitions
-		 */
+		/* we only want to use bps that are in partitions */
+		if(!part_ptr->node_bitmap) {
+			debug4("Partition %s doesn't have any nodes in it.",
+			       part_ptr->name);
+			continue;
+		}
 		bit_or(bitmap, part_ptr->node_bitmap);
 	}
 	list_iterator_destroy(itr);
 
 	bit_not(bitmap);
+	if(bit_ffs(bitmap) != -1) {
+		fatal("We don't have any nodes in any partitions.  "
+		      "Can't create blocks.  "
+		      "Please check your slurm.conf.");
+	}
+
 	non_usable_nodes = bitmap2node_name(bitmap);
 	FREE_NULL_BITMAP(bitmap);
 	removable_set_bps(non_usable_nodes);
diff --git a/src/plugins/select/bluegene/plugin/defined_block.c b/src/plugins/select/bluegene/plugin/defined_block.c
index a7576dd9b8cbdd071bb502577c9d4ac9a3f7cb85..6370e7b37ae13f8325de57e729b104810fb9948e 100644
--- a/src/plugins/select/bluegene/plugin/defined_block.c
+++ b/src/plugins/select/bluegene/plugin/defined_block.c
@@ -73,11 +73,21 @@ extern int create_defined_blocks(bg_layout_t overlapped,
 		/* we only want to use bps that are in
 		 * partitions
 		 */
+		if(!part_ptr->node_bitmap) {
+			debug4("Partition %s doesn't have any nodes in it.",
+			       part_ptr->name);
+			continue;
+		}
 		bit_or(bitmap, part_ptr->node_bitmap);
 	}
 	list_iterator_destroy(itr);
 
 	bit_not(bitmap);
+	if(bit_ffs(bitmap) != -1) {
+		fatal("We don't have any nodes in any partitions.  "
+		      "Can't create blocks.  "
+		      "Please check your slurm.conf.");
+	}
 	non_usable_nodes = bitmap2node_name(bitmap);
 	removable_set_bps(non_usable_nodes);
 	FREE_NULL_BITMAP(bitmap);
@@ -290,6 +300,11 @@ extern int create_full_system_block(List bg_found_block_list)
 		/* we only want to use bps that are in
 		 * partitions
 		 */
+		if(!part_ptr->node_bitmap) {
+			debug4("Partition %s doesn't have any nodes in it.",
+			       part_ptr->name);
+			continue;
+		}
 		bit_or(bitmap, part_ptr->node_bitmap);
 	}
 	list_iterator_destroy(itr);
diff --git a/src/plugins/select/cons_res/dist_tasks.c b/src/plugins/select/cons_res/dist_tasks.c
index b454bf8c4a8081434ee3ef9c72c7f34235ab1ea9..147fce996120f574a569a5a5449e54cf654632c4 100644
--- a/src/plugins/select/cons_res/dist_tasks.c
+++ b/src/plugins/select/cons_res/dist_tasks.c
@@ -177,6 +177,7 @@ static void _block_sync_core_bitmap(struct job_record *job_ptr,
 	uint16_t cpus, num_bits, vpus = 1;
 	job_resources_t *job_res = job_ptr->job_resrcs;
 	bool alloc_cores = false, alloc_sockets = false;
+	uint16_t ntasks_per_core = 0xffff;
 
 	if (!job_res)
 		return;
@@ -191,6 +192,9 @@ static void _block_sync_core_bitmap(struct job_record *job_ptr,
 		alloc_cores = true;
 #endif
 
+	if (job_ptr->details && job_ptr->details->mc_ptr)
+		ntasks_per_core = job_ptr->details->mc_ptr->ntasks_per_core;
+
 	size  = bit_size(job_res->node_bitmap);
 	csize = bit_size(job_res->core_bitmap);
 	for (c = 0, i = 0, n = 0; n < size; n++) {
@@ -204,7 +208,7 @@ static void _block_sync_core_bitmap(struct job_record *job_ptr,
 			fatal ("cons_res: _block_sync_core_bitmap index error");
 
 		cpus  = job_res->cpus[i];
-		vpus  = select_node_record[n].vpus;
+		vpus  = MIN(select_node_record[n].vpus, ntasks_per_core);
 
 		while ((cpus > 0) && (num_bits > 0)) {
 			if (bit_test(job_res->core_bitmap, c++)) {
@@ -256,6 +260,7 @@ static void _cyclic_sync_core_bitmap(struct job_record *job_ptr,
 	job_resources_t *job_res = job_ptr->job_resrcs;
 	bitstr_t *core_map;
 	bool *sock_used, alloc_cores = false, alloc_sockets = false;
+	uint16_t ntasks_per_core = 0xffff;
 
 	if ((job_res == NULL) || (job_res->core_bitmap == NULL))
 		return;
@@ -270,6 +275,8 @@ static void _cyclic_sync_core_bitmap(struct job_record *job_ptr,
 		alloc_cores = true;
 #endif
 	core_map = job_res->core_bitmap;
+	if (job_ptr->details && job_ptr->details->mc_ptr)
+		ntasks_per_core = job_ptr->details->mc_ptr->ntasks_per_core;
 
 	sock_size  = select_node_record[0].sockets;
 	sock_start = xmalloc(sock_size * sizeof(uint32_t));
@@ -284,7 +291,7 @@ static void _cyclic_sync_core_bitmap(struct job_record *job_ptr,
 			continue;
 		sockets = select_node_record[n].sockets;
 		cps     = select_node_record[n].cores;
-		vpus    = select_node_record[n].vpus;
+		vpus    = MIN(select_node_record[n].vpus, ntasks_per_core);
 #ifdef CR_DEBUG
 		info("DEBUG: job %u node %s vpus %u cpus %u",
 		     job_ptr->job_id, select_node_record[n].node_ptr->name,
diff --git a/src/plugins/select/cons_res/job_test.c b/src/plugins/select/cons_res/job_test.c
index 31d152fd834777042fd41221df57c2390f870f41..f6c396f0915115e3ac37e51e0e80abae70f8e9c3 100644
--- a/src/plugins/select/cons_res/job_test.c
+++ b/src/plugins/select/cons_res/job_test.c
@@ -139,10 +139,12 @@ uint16_t _allocate_sockets(struct job_record *job_ptr, bitstr_t *core_map,
 	uint16_t cores_per_socket = select_node_record[node_i].cores;
 	uint16_t threads_per_core = select_node_record[node_i].vpus;
 	uint16_t min_cores = 0, min_sockets = 0, ntasks_per_socket = 0;
+	uint16_t ntasks_per_core = 0xffff;
 
 	if (job_ptr->details && job_ptr->details->mc_ptr) {
 		min_cores   = job_ptr->details->mc_ptr->min_cores;
 		min_sockets = job_ptr->details->mc_ptr->min_sockets;
+		ntasks_per_core = job_ptr->details->mc_ptr->ntasks_per_core;
 		ntasks_per_socket = job_ptr->details->mc_ptr->ntasks_per_socket;
 	}
 
@@ -264,11 +266,12 @@ uint16_t _allocate_sockets(struct job_record *job_ptr, bitstr_t *core_map,
 	 */
 	avail_cpus = 0;
 	num_tasks = 0;
+	threads_per_core = MIN(threads_per_core, ntasks_per_core); 
 	for (i = 0; i < sockets; i++) {
 		uint16_t tmp = free_cores[i] * threads_per_core;
 		avail_cpus += tmp;
 		if (ntasks_per_socket)
-			num_tasks += MIN(tmp,ntasks_per_socket);
+			num_tasks += MIN(tmp, ntasks_per_socket);
 		else
 			num_tasks += tmp;
 	}
@@ -359,10 +362,12 @@ uint16_t _allocate_cores(struct job_record *job_ptr, bitstr_t *core_map,
 	uint16_t cores_per_socket = select_node_record[node_i].cores;
 	uint16_t threads_per_core = select_node_record[node_i].vpus;
 	uint16_t min_cores = 0, min_sockets = 0;
+	uint16_t ntasks_per_core = 0xffff;
 
 	if (!cpu_type && job_ptr->details && job_ptr->details->mc_ptr) {
 		min_cores   = job_ptr->details->mc_ptr->min_cores;
 		min_sockets = job_ptr->details->mc_ptr->min_sockets;
+		ntasks_per_core = job_ptr->details->mc_ptr->ntasks_per_core;
 	}
 
 	/* These are the job parameters that we must respect:
@@ -464,6 +469,7 @@ uint16_t _allocate_cores(struct job_record *job_ptr, bitstr_t *core_map,
 	 * Note: cpus_per_task and ntasks_per_core need to play nice
 	 *       2 tasks_per_core vs. 2 cpus_per_task
 	 */
+	threads_per_core = MIN(threads_per_core, ntasks_per_core);
 	num_tasks = avail_cpus = threads_per_core;
 	i = job_ptr->details->mc_ptr->ntasks_per_core;
 	if (!cpu_type && i > 0)
diff --git a/src/plugins/select/cons_res/select_cons_res.c b/src/plugins/select/cons_res/select_cons_res.c
index 5a983fed5be3dfd841dbca2b6a7f1689b7e832e4..ab528990f77fbad40ac65ff6cbca63795a7d9e7a 100644
--- a/src/plugins/select/cons_res/select_cons_res.c
+++ b/src/plugins/select/cons_res/select_cons_res.c
@@ -174,7 +174,6 @@ static int select_node_cnt = 0;
 static bool job_preemption_enabled = false;
 static bool job_preemption_killing = false;
 static bool job_preemption_tested  = false;
-static List preempt_job_list = NULL;
 
 struct select_nodeinfo {
 	uint16_t magic;		/* magic number */
@@ -185,7 +184,6 @@ extern select_nodeinfo_t *select_p_select_nodeinfo_alloc(uint32_t size);
 extern int select_p_select_nodeinfo_free(select_nodeinfo_t *nodeinfo);
 
 /* Procedure Declarations */
-static void _preempt_list_del(void *x);
 static int _run_now(struct job_record *job_ptr, bitstr_t *bitmap,
 		    uint32_t min_nodes, uint32_t max_nodes,
 		    uint32_t req_nodes, uint16_t job_node_req,
@@ -200,7 +198,7 @@ static int _will_run_test(struct job_record *job_ptr, bitstr_t *bitmap,
 
 #if (CR_DEBUG)
 
-static void _dump_job_res(job_resources_t job) {
+static void _dump_job_res(struct job_resources *job) {
 	char str[64];
 
 	if (job->core_bitmap)
@@ -1201,8 +1199,7 @@ static int _run_now(struct job_record *job_ptr, bitstr_t *bitmap,
 			/* Build list of preemptee jobs whose resources are
 			 * actually used */
 			if (*preemptee_job_list == NULL) {
-				*preemptee_job_list = list_create(
-							_preempt_list_del);
+				*preemptee_job_list = list_create(NULL);
 				if (*preemptee_job_list == NULL)
 					fatal("list_create malloc failure");
 			}
@@ -1343,7 +1340,7 @@ static int _will_run_test(struct job_record *job_ptr, bitstr_t *bitmap,
 		 * actually used. List returned even if not killed
 		 * in selected plugin, but by Moab or something else. */
 		if (*preemptee_job_list == NULL) {
-			*preemptee_job_list = list_create(_preempt_list_del);
+			*preemptee_job_list = list_create(NULL);
 			if (*preemptee_job_list == NULL)
 				fatal("list_create malloc failure");
 		}
@@ -1454,11 +1451,6 @@ static int _synchronize_bitmaps(struct job_record *job_ptr,
 	return SLURM_SUCCESS;
 }
 
-static void _preempt_list_del(void *x)
-{
-	xfree(x);
-}
-
 /*
  * init() is called when the plugin is loaded, before any other functions
  * are called.  Put global initialization here.
@@ -1474,8 +1466,6 @@ extern int init(void)
 	fatal("Use SelectType=select/bluegene");
 #endif
 	cr_type = (select_type_plugin_info_t)slurmctld_conf.select_type_param;
-	if (!preempt_job_list)
-		preempt_job_list = list_create(_preempt_list_del);
 	verbose("%s loaded with argument %d ", plugin_name, cr_type);
 
 	return SLURM_SUCCESS;
@@ -1492,9 +1482,6 @@ extern int fini(void)
 	xfree(cr_num_core_count);
 	cr_node_num_cores = NULL;
 	cr_num_core_count = NULL;
-	if (preempt_job_list)
-		list_destroy(preempt_job_list);
-	preempt_job_list = NULL;
 
 	verbose("%s shutting down ...", plugin_name);
 
diff --git a/src/plugins/select/linear/select_linear.c b/src/plugins/select/linear/select_linear.c
index 53e0bebde735b53555bb47fde3e744e70cf41999..7f3f13f305cc0b0ec47b1fd53a17ec77dbdacd73 100644
--- a/src/plugins/select/linear/select_linear.c
+++ b/src/plugins/select/linear/select_linear.c
@@ -129,7 +129,6 @@ static int _job_test(struct job_record *job_ptr, bitstr_t *bitmap,
 static int _job_test_topo(struct job_record *job_ptr, bitstr_t *bitmap,
 			  uint32_t min_nodes, uint32_t max_nodes,
 			  uint32_t req_nodes);
-static void _preempt_list_del(void *x);
 static bool _rem_run_job(struct part_cr_record *part_cr_ptr, uint32_t job_id);
 static int _rm_job_from_nodes(struct node_cr_record *node_cr_ptr,
 			      struct job_record *job_ptr, char *pre_err,
@@ -1750,8 +1749,7 @@ static int _run_now(struct job_record *job_ptr, bitstr_t *bitmap,
 			/* Build list of preemptee jobs whose resources are
 			 * actually used */
 			if (*preemptee_job_list == NULL) {
-				*preemptee_job_list = list_create(
-							_preempt_list_del);
+				*preemptee_job_list = list_create(NULL);
 				if (*preemptee_job_list == NULL)
 					fatal("list_create malloc failure");
 			}
@@ -1890,7 +1888,7 @@ static int _will_run_test(struct job_record *job_ptr, bitstr_t *bitmap,
 		 * actually used. List returned even if not killed
 		 * in selected plugin, but by Moab or something else. */
 		if (*preemptee_job_list == NULL) {
-			*preemptee_job_list = list_create(_preempt_list_del);
+			*preemptee_job_list = list_create(NULL);
 			if (*preemptee_job_list == NULL)
 				fatal("list_create malloc failure");
 		}
@@ -1919,11 +1917,6 @@ static int  _cr_job_list_sort(void *x, void *y)
 	return (int) difftime(job1_ptr->end_time, job2_ptr->end_time);
 }
 
-static void _preempt_list_del(void *x)
-{
-	xfree(x);
-}
-
 /*
  * init() is called when the plugin is loaded, before any other functions
  * are called.  Put global initialization here.
@@ -1935,7 +1928,7 @@ extern int init ( void )
 	rc = _init_status_pthread();
 #endif
 #ifdef HAVE_BG
-	error("%s is incompatable with BlueGene", plugin_name);
+	error("%s is incompatible with BlueGene", plugin_name);
 	fatal("Use SelectType=select/bluegene");
 #endif
 	cr_type = (select_type_plugin_info_t)
diff --git a/src/sacct/options.c b/src/sacct/options.c
index b7437028dbd638b37f4d9c8cccb4c82fb02442cc..c5959b72926c9323d193f5059394b305fbcee2ba 100644
--- a/src/sacct/options.c
+++ b/src/sacct/options.c
@@ -867,9 +867,8 @@ void parse_command_line(int argc, char **argv)
 	job_cond->without_steps = params.opt_allocs;
 
 	if(!job_cond->usage_start) {
-		if(job_cond->state_list)
-			job_cond->usage_start = time(NULL);
-		else {
+		job_cond->usage_start = time(NULL);
+		if(!job_cond->state_list) {
 			struct tm start_tm;
 
 			if(!localtime_r(&job_cond->usage_start, &start_tm)) {
diff --git a/src/sacctmgr/account_functions.c b/src/sacctmgr/account_functions.c
index 7647b21d7a39d4d69f6383fa4fd3cc4a75e3c1f0..1bdac830313c7a7785cf02c00c27a398e85fa944 100644
--- a/src/sacctmgr/account_functions.c
+++ b/src/sacctmgr/account_functions.c
@@ -282,8 +282,12 @@ static int _set_cond(int *start, int argc, char *argv[],
 			if(addto_qos_char_list(assoc_cond->qos_list, g_qos_list,
 					       argv[i]+end, option))
 				a_set = 1;
-			else
+			else {
 				exit_code = 1;
+				fprintf(stderr,
+					" Bad QosLevel value: %s\n",
+					argv[i]+end);
+			}
 		} else {
 			exit_code=1;
 			fprintf(stderr, " Unknown condition: %s\n"
@@ -489,8 +493,12 @@ static int _set_rec(int *start, int argc, char *argv[],
 			if(addto_qos_char_list(assoc->qos_list,
 					       g_qos_list, argv[i]+end, option))
 				a_set = 1;
-			else
+			else {
 				exit_code = 1;
+				fprintf(stderr,
+					" Bad QosLevel value: %s\n",
+					argv[i]+end);
+			}
 		} else {
 			exit_code=1;
 			fprintf(stderr, " Unknown option: %s\n"
diff --git a/src/sacctmgr/user_functions.c b/src/sacctmgr/user_functions.c
index 996ebaca0841cb8e8de1f23e32cd2134a9e02ed2..49ddb553876b6c6dc2d638db94b003b96f7f86e7 100644
--- a/src/sacctmgr/user_functions.c
+++ b/src/sacctmgr/user_functions.c
@@ -859,8 +859,12 @@ extern int sacctmgr_add_user(int argc, char *argv[])
 			if(addto_qos_char_list(start_assoc.qos_list, g_qos_list,
 					       argv[i]+end, option))
 				limit_set = 1;
-			else
+			else {
 				exit_code = 1;
+				fprintf(stderr,
+					" Bad QosLevel value: %s\n",
+					argv[i]+end);
+			}
 		} else if (!strncasecmp (argv[i], "WCKeys",
 					 MAX(command_len, 1))) {
 			slurm_addto_char_list(wckey_cond->name_list,
diff --git a/src/salloc/opt.c b/src/salloc/opt.c
index d6a305e513b054e7d7a1e892af4c36983ad4379a..5b6d23a90eaf00fe3a200be110eec97db94dedef 100644
--- a/src/salloc/opt.c
+++ b/src/salloc/opt.c
@@ -993,15 +993,6 @@ void set_options(const int argc, char **argv)
 						&opt.min_threads_per_core,
 						NULL, true );
 			break;
-		case LONG_OPT_HINT:
-			if (verify_hint(optarg,
-					&opt.min_sockets_per_node,
-					&opt.min_cores_per_socket,
-					&opt.min_threads_per_core,
-					&opt.cpu_bind_type)) {
-				exit(error_exit);
-			}
-			break;
 		case LONG_OPT_NTASKSPERNODE:
 			opt.ntasks_per_node = _get_int(optarg,
 				"ntasks-per-node");
@@ -1014,6 +1005,17 @@ void set_options(const int argc, char **argv)
 			opt.ntasks_per_core = _get_int(optarg,
 				"ntasks-per-core");
 			break;
+		case LONG_OPT_HINT:
+			/* Keep after other options filled in */
+			if (verify_hint(optarg,
+					&opt.min_sockets_per_node,
+					&opt.min_cores_per_socket,
+					&opt.min_threads_per_core,
+					&opt.ntasks_per_core,
+					&opt.cpu_bind_type)) {
+				exit(error_exit);
+			}
+			break;
 		case LONG_OPT_REBOOT:
 			opt.reboot = true;
 			break;
diff --git a/src/salloc/salloc.c b/src/salloc/salloc.c
index 4d297931ad0e9d5194f650251645216b9e903784..b4b75ac9349ca452cefde6123d11dba36c6076ef 100644
--- a/src/salloc/salloc.c
+++ b/src/salloc/salloc.c
@@ -46,6 +46,7 @@
 #include <stdbool.h>
 #include <stdio.h>
 #include <stdlib.h>
+#include <sys/param.h>
 #include <sys/types.h>
 #include <sys/wait.h>
 #include <time.h>
@@ -81,6 +82,7 @@
 char **command_argv;
 int command_argc;
 pid_t command_pid = -1;
+char *work_dir = NULL;
 
 enum possible_allocation_states allocation_state = NOT_GRANTED;
 pthread_mutex_t allocation_state_lock = PTHREAD_MUTEX_INITIALIZER;
@@ -100,6 +102,7 @@ static void _signal_while_allocating(int signo);
 static void _job_complete_handler(srun_job_complete_msg_t *msg);
 static void _set_exit_code(void);
 static void _set_rlimits(char **env);
+static void _set_submit_dir_env(void);
 static void _timeout_handler(srun_timeout_msg_t *msg);
 static void _user_msg_handler(srun_user_msg_t *msg);
 static void _ping_handler(srun_ping_msg_t *msg);
@@ -163,6 +166,8 @@ int main(int argc, char *argv[])
 		error("Plugin stack post-option processing failed");
 		exit(error_exit);
 	}
+
+	_set_submit_dir_env();
 	if (opt.cwd && chdir(opt.cwd)) {
 		error("chdir(%s): %m", opt.cwd);
 		exit(error_exit);
@@ -302,7 +307,7 @@ int main(int argc, char *argv[])
 	/*
 	 * Run the user's command.
 	 */
-	if(env_array_for_job(&env, alloc, &desc) != SLURM_SUCCESS)
+	if (env_array_for_job(&env, alloc, &desc) != SLURM_SUCCESS)
 		goto relinquish;
 
 	/* Add default task count for srun, if not already set */
@@ -423,6 +428,21 @@ static void _set_exit_code(void)
 	}
 }
 
+/* Set SLURM_SUBMIT_DIR environment variable with current state */
+static void _set_submit_dir_env(void)
+{
+	work_dir = xmalloc(MAXPATHLEN + 1);
+	if ((getcwd(work_dir, MAXPATHLEN)) == NULL) {
+		error("getcwd failed: %m");
+		exit(error_exit);
+	}
+
+	if (setenvf(NULL, "SLURM_SUBMIT_DIR", "%s", work_dir) < 0) {
+		error("unable to set SLURM_SUBMIT_DIR in environment");
+		return;
+	}
+}
+
 /* Returns 0 on success, -1 on failure */
 static int _fill_job_desc_from_opts(job_desc_msg_t *desc)
 {
@@ -476,6 +496,11 @@ static int _fill_job_desc_from_opts(job_desc_msg_t *desc)
 	if (opt.qos)
 		desc->qos = xstrdup(opt.qos);
 
+	if (opt.cwd)
+		desc->work_dir = xstrdup(opt.cwd);
+	else if (work_dir)
+		desc->work_dir = xstrdup(work_dir);
+
 	if (opt.hold)
 		desc->priority     = 0;
 #ifdef HAVE_BG
diff --git a/src/sbatch/opt.c b/src/sbatch/opt.c
index 3129061eb6a3d97946c8fbbbd88a958c12123c3d..e6cd19540eae83903e4ef04cfad57733ee8f7347 100644
--- a/src/sbatch/opt.c
+++ b/src/sbatch/opt.c
@@ -1435,15 +1435,6 @@ static void _set_options(int argc, char **argv)
 						&opt.min_threads_per_core,
 						NULL, true );
 			break;
-		case LONG_OPT_HINT:
-			if (verify_hint(optarg,
-					&opt.min_sockets_per_node,
-					&opt.min_cores_per_socket,
-					&opt.min_threads_per_core,
-					&opt.cpu_bind_type)) {
-				exit(error_exit);
-			}
-			break;
 		case LONG_OPT_NTASKSPERNODE:
 			opt.ntasks_per_node = _get_int(optarg,
 				"ntasks-per-node");
@@ -1462,6 +1453,17 @@ static void _set_options(int argc, char **argv)
 			setenvf(NULL, "SLURM_NTASKS_PER_CORE", "%d",
 				opt.ntasks_per_socket);
 			break;
+		case LONG_OPT_HINT:
+			/* Keep after other options filled in */
+			if (verify_hint(optarg,
+					&opt.min_sockets_per_node,
+					&opt.min_cores_per_socket,
+					&opt.min_threads_per_core,
+					&opt.ntasks_per_core,
+					&opt.cpu_bind_type)) {
+				exit(error_exit);
+			}
+			break;
 		case LONG_OPT_BLRTS_IMAGE:
 			xfree(opt.blrtsimage);
 			opt.blrtsimage = xstrdup(optarg);
diff --git a/src/slurmctld/job_mgr.c b/src/slurmctld/job_mgr.c
index 7c543c08db30b3b6ac694b728ad2b574275ff5f2..ea723c0b3104ff30bddc0382cf23071c52c6ddb0 100644
--- a/src/slurmctld/job_mgr.c
+++ b/src/slurmctld/job_mgr.c
@@ -7009,7 +7009,7 @@ static bool _validate_acct_policy(job_desc_msg_t *job_desc,
 		    (qos_ptr->grp_used_submit_jobs
 		     >= qos_ptr->grp_submit_jobs)) {
 			info("job submit for user %s(%u): "
-			     "group max submit job limit exceded %u "
+			     "group max submit job limit exceeded %u "
 			     "for qos '%s'",
 			     user_name,
 			     job_desc->user_id,
@@ -7084,7 +7084,7 @@ static bool _validate_acct_policy(job_desc_msg_t *job_desc,
 			if(used_limits && (used_limits->submit_jobs
 					   >= qos_ptr->max_submit_jobs_pu)) {
 				info("job submit for user %s(%u): "
-				     "account max submit job limit exceded %u",
+				     "account max submit job limit exceeded %u",
 				     user_name,
 				     job_desc->user_id,
 				     qos_ptr->max_submit_jobs_pu);
@@ -7461,9 +7461,9 @@ extern int update_job_wckey(char *module, struct job_record *job_ptr,
 		}
 	}
 
+	xfree(job_ptr->wckey);
 	if (wckey_rec.name && wckey_rec.name[0] != '\0') {
-		xstrfmtcat(job_ptr->name, "\"%s", wckey_rec.name);
-		job_ptr->account = xstrdup(wckey_rec.name);
+		job_ptr->wckey = xstrdup(wckey_rec.name);
 		info("%s: setting wckey to %s for job_id %u",
 		     module, wckey_rec.name, job_ptr->job_id);
 	} else {
diff --git a/src/slurmctld/job_scheduler.c b/src/slurmctld/job_scheduler.c
index c1416fdbf56fa8600bff87870bc798f56e70ca8a..bf70191f62ff37fdfe393da6369e6d346339c21c 100644
--- a/src/slurmctld/job_scheduler.c
+++ b/src/slurmctld/job_scheduler.c
@@ -808,8 +808,9 @@ extern int update_job_dependency(struct job_record *job_ptr, char *new_depend)
 	if (job_ptr->details == NULL)
 		return EINVAL;
 
-	/* Clear dependencies on NULL or empty dependency input */
-	if ((new_depend == NULL) || (new_depend[0] == '\0')) {
+	/* Clear dependencies on NULL, "0", or empty dependency input */
+	if ((new_depend == NULL) || (new_depend[0] == '\0') ||
+	    ((new_depend[0] == '0') && (new_depend[1] == '\0'))) {
 		xfree(job_ptr->details->dependency);
 		if (job_ptr->details->depend_list) {
 			list_destroy(job_ptr->details->depend_list);
@@ -846,7 +847,7 @@ extern int update_job_dependency(struct job_record *job_ptr, char *new_depend)
 		if ((sep_ptr == NULL) && (job_id == 0)) {
 			job_id = strtol(tok, &sep_ptr, 10);
 			if ((sep_ptr == NULL) || (sep_ptr[0] != '\0') ||
-			    (job_id < 0) || (job_id == job_ptr->job_id)) {
+			    (job_id == 0) || (job_id == job_ptr->job_id)) {
 				rc = EINVAL;
 				break;
 			}
@@ -864,6 +865,9 @@ extern int update_job_dependency(struct job_record *job_ptr, char *new_depend)
 			if (!list_append(new_depend_list, dep_ptr))
 				fatal("list_append memory allocation failure");
 			break;
+		} else if (sep_ptr == NULL) {
+			rc = EINVAL;
+			break;
 		}
 
 		if      (strncasecmp(tok, "afternotok", 10) == 0)
@@ -882,7 +886,7 @@ extern int update_job_dependency(struct job_record *job_ptr, char *new_depend)
 		while (rc == SLURM_SUCCESS) {
 			job_id = strtol(sep_ptr, &sep_ptr2, 10);
 			if ((sep_ptr2 == NULL) ||
-			    (job_id < 0) || (job_id == job_ptr->job_id) ||
+			    (job_id == 0) || (job_id == job_ptr->job_id) ||
 			    ((sep_ptr2[0] != '\0') && (sep_ptr2[0] != ',') &&
 			     (sep_ptr2[0] != ':'))) {
 				rc = EINVAL;
@@ -1094,15 +1098,18 @@ extern int epilog_slurmctld(struct job_record *job_ptr)
 	slurm_attr_init(&thread_attr_epilog);
 	pthread_attr_setdetachstate(&thread_attr_epilog,
 				    PTHREAD_CREATE_DETACHED);
-	while(1) {
+	while (1) {
 		rc = pthread_create(&thread_id_epilog,
 				    &thread_attr_epilog,
 				    _run_epilog, (void *) job_ptr);
-		if (rc == 0)
+		if (rc == 0) {
+			slurm_attr_destroy(&thread_attr_epilog);
 			return SLURM_SUCCESS;
+		}
 		if (errno == EAGAIN)
 			continue;
 		error("pthread_create: %m");
+		slurm_attr_destroy(&thread_attr_epilog);
 		return errno;
 	}
 }
@@ -1237,15 +1244,18 @@ extern int prolog_slurmctld(struct job_record *job_ptr)
 	slurm_attr_init(&thread_attr_prolog);
 	pthread_attr_setdetachstate(&thread_attr_prolog,
 				    PTHREAD_CREATE_DETACHED);
-	while(1) {
+	while (1) {
 		rc = pthread_create(&thread_id_prolog,
 				    &thread_attr_prolog,
 				    _run_prolog, (void *) job_ptr);
-		if (rc == 0)
+		if (rc == 0) {
+			slurm_attr_destroy(&thread_attr_prolog);
 			return SLURM_SUCCESS;
+		}
 		if (errno == EAGAIN)
 			continue;
 		error("pthread_create: %m");
+		slurm_attr_destroy(&thread_attr_prolog);
 		return errno;
 	}
 }
diff --git a/src/slurmctld/step_mgr.c b/src/slurmctld/step_mgr.c
index 96c12e78e55453f8bfa442aa99631fa6f53dfe9e..2dbb96927c220b30501d144a58129f83ed02fca5 100644
--- a/src/slurmctld/step_mgr.c
+++ b/src/slurmctld/step_mgr.c
@@ -2,7 +2,7 @@
  *  step_mgr.c - manage the job step information of slurm
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
- *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2010 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>, et. al.
  *  CODE-OCEC-09-009. All rights reserved.
@@ -76,6 +76,7 @@ static int  _count_cpus(bitstr_t *bitmap);
 static struct step_record * _create_step_record (struct job_record *job_ptr);
 static void _dump_step_layout(struct step_record *step_ptr);
 static void _free_step_rec(struct step_record *step_ptr);
+static bool _is_mem_resv(void);
 static void _pack_ctld_job_step_info(struct step_record *step, Buf buffer);
 static bitstr_t * _pick_step_nodes (struct job_record  *job_ptr,
 				    job_step_create_request_msg_t *step_spec,
@@ -609,13 +610,14 @@ _pick_step_nodes (struct job_record  *job_ptr,
 		return NULL;
 	}
 
-	if (step_spec->mem_per_cpu) {
+	if (step_spec->mem_per_cpu && _is_mem_resv()) {
 		int node_inx = 0, usable_mem;
-		for (i=bit_ffs(job_resrcs_ptr->node_bitmap); i<node_record_count;
-		     i++) {
+		for (i=bit_ffs(job_resrcs_ptr->node_bitmap); 
+		     i<node_record_count; i++) {
 			if (!bit_test(job_resrcs_ptr->node_bitmap, i))
 				continue;
-			usable_mem = job_resrcs_ptr->memory_allocated[node_inx] -
+			usable_mem = job_resrcs_ptr->
+				     memory_allocated[node_inx] -
 				     job_resrcs_ptr->memory_used[node_inx];
 			task_cnt = usable_mem / step_spec->mem_per_cpu;
 			if (cpus_per_task > 0)
@@ -1013,7 +1015,7 @@ extern void step_alloc_lps(struct step_record *step_ptr)
 	}
 #endif
 
-	if (step_ptr->mem_per_cpu &&
+	if (step_ptr->mem_per_cpu && _is_mem_resv() &&
 	    ((job_resrcs_ptr->memory_allocated == NULL) ||
 	     (job_resrcs_ptr->memory_used == NULL))) {
 		error("step_alloc_lps: lack memory allocation details "
@@ -1035,7 +1037,7 @@ extern void step_alloc_lps(struct step_record *step_ptr)
 		cpus_alloc = step_ptr->step_layout->tasks[step_node_inx] *
 			     step_ptr->cpus_per_task;
 		job_resrcs_ptr->cpus_used[job_node_inx] += cpus_alloc;
-		if (step_ptr->mem_per_cpu) {
+		if (step_ptr->mem_per_cpu && _is_mem_resv()) {
 			job_resrcs_ptr->memory_used[job_node_inx] +=
 				(step_ptr->mem_per_cpu * cpus_alloc);
 		}
@@ -1074,7 +1076,7 @@ static void _dump_step_layout(struct step_record *step_ptr)
 
 	info("====================");
 	info("step_id:%u.%u", job_ptr->job_id, step_ptr->step_id);
-	for (i=0, bit_inx= 0, node_inx=0; node_inx<job_resrcs_ptr->nhosts; i++) {
+	for (i=0, bit_inx=0, node_inx=0; node_inx<job_resrcs_ptr->nhosts; i++) {
 		for (rep=0; rep<job_resrcs_ptr->sock_core_rep_count[i]; rep++) {
 			for (sock_inx=0;
 			     sock_inx<job_resrcs_ptr->sockets_per_node[i];
@@ -1118,7 +1120,7 @@ static void _step_dealloc_lps(struct step_record *step_ptr)
 	if (i_first == -1)	/* empty bitmap */
 		return;
 
-	if (step_ptr->mem_per_cpu &&
+	if (step_ptr->mem_per_cpu && _is_mem_resv() &&
 	    ((job_resrcs_ptr->memory_allocated == NULL) ||
 	     (job_resrcs_ptr->memory_used == NULL))) {
 		error("_step_dealloc_lps: lack memory allocation details "
@@ -1144,7 +1146,7 @@ static void _step_dealloc_lps(struct step_record *step_ptr)
 				job_ptr->job_id, step_ptr->step_id);
 			job_resrcs_ptr->cpus_used[job_node_inx] = 0;
 		}
-		if (step_ptr->mem_per_cpu) {
+		if (step_ptr->mem_per_cpu && _is_mem_resv()) {
 			uint32_t mem_use = step_ptr->mem_per_cpu * cpus_alloc;
 			if (job_resrcs_ptr->memory_used[job_node_inx] >= mem_use) {
 				job_resrcs_ptr->memory_used[job_node_inx] -=
@@ -1490,7 +1492,7 @@ extern slurm_step_layout_t *step_layout_create(struct step_record *step_ptr,
 	xassert(job_resrcs_ptr->cpus);
 	xassert(job_resrcs_ptr->cpus_used);
 
-	if (step_ptr->mem_per_cpu &&
+	if (step_ptr->mem_per_cpu && _is_mem_resv() &&
 	    ((job_resrcs_ptr->memory_allocated == NULL) ||
 	     (job_resrcs_ptr->memory_used == NULL))) {
 		error("step_layout_create: lack memory allocation details "
@@ -1515,7 +1517,7 @@ extern slurm_step_layout_t *step_layout_create(struct step_record *step_ptr,
 					      job_resrcs_ptr->cpus_used[pos];
 			} else
 				usable_cpus = job_resrcs_ptr->cpus[pos];
-			if (step_ptr->mem_per_cpu) {
+			if (step_ptr->mem_per_cpu && _is_mem_resv()) {
 				usable_mem =
 					job_resrcs_ptr->memory_allocated[pos]-
 					job_resrcs_ptr->memory_used[pos];
@@ -1688,7 +1690,7 @@ extern int pack_ctld_job_step_info_response_msg(
 	}
 	list_iterator_destroy(job_iterator);
 
-	if(list_count(job_list) && !valid_job && !steps_packed)
+	if (list_count(job_list) && !valid_job && !steps_packed)
 		error_code = ESLURM_INVALID_JOB_ID;
 
 	part_filter_clear();
@@ -1968,7 +1970,7 @@ extern int step_partial_comp(step_complete_msg_t *req, uid_t uid,
 		return ESLURM_INVALID_JOB_ID;
 	}
 	if (step_ptr->batch_step) {
-		if(rem)
+		if (rem)
 			*rem = 0;
 		step_ptr->exit_code = req->step_rc;
 		if (max_rc)
@@ -2011,7 +2013,8 @@ extern int step_partial_comp(step_complete_msg_t *req, uid_t uid,
 		step_ptr->exit_code = MAX(step_ptr->exit_code, req->step_rc);
 	}
 
-	bit_nset(step_ptr->exit_node_bitmap, req->range_first, req->range_last);
+	bit_nset(step_ptr->exit_node_bitmap, 
+		 req->range_first, req->range_last);
 	rem_nodes = bit_clear_count(step_ptr->exit_node_bitmap);
 	if (rem)
 		*rem = rem_nodes;
@@ -2584,3 +2587,23 @@ check_job_step_time_limit (struct job_record *job_ptr, time_t now)
 
 	list_iterator_destroy (step_iterator);
 }
+
+/* Return true if memory is a reserved resources, false otherwise */
+static bool _is_mem_resv(void)
+{
+	static bool mem_resv_value  = false;
+	static bool mem_resv_tested = false;
+
+	if (!mem_resv_tested) {
+		mem_resv_tested = true;
+		slurm_ctl_conf_t *conf = slurm_conf_lock();
+		if ((conf->select_type_param == CR_MEMORY)        ||
+		    (conf->select_type_param == CR_SOCKET_MEMORY) ||
+		    (conf->select_type_param == CR_CORE_MEMORY)   ||
+		    (conf->select_type_param == CR_CPU_MEMORY))
+			mem_resv_value = true;
+		slurm_conf_unlock();
+	}
+
+	return mem_resv_value;
+}
diff --git a/src/slurmd/common/set_oomadj.c b/src/slurmd/common/set_oomadj.c
index 26d0a87cdc4ec21c46b61d59747c4725846db759..c1e82042b97196d46c852e3fa83427a928710c15 100644
--- a/src/slurmd/common/set_oomadj.c
+++ b/src/slurmd/common/set_oomadj.c
@@ -58,6 +58,7 @@ extern int set_oom_adj(int adj)
 		return -1;
 	}
 	if (snprintf(oom_adj, 16, "%d", adj) >= 16) {
+		close(fd);
 		return -1;
 	}
 	while ((write(fd, oom_adj, strlen(oom_adj)) < 0) && (errno == EINTR))
diff --git a/src/smap/configure_functions.c b/src/smap/configure_functions.c
index 598d54fca8d82716a2a2fad484e5cbebf3de662a..6b6b34a08982f2d07b34c39ce578004a4df8dfa2 100644
--- a/src/smap/configure_functions.c
+++ b/src/smap/configure_functions.c
@@ -1026,8 +1026,20 @@ static int _save_allocation(char *com, List allocated_blocks)
 		xstrcat(save_string,
 			"#\n# bluegene.conf file generated by smap\n");
 		xstrcat(save_string,
-			"# See the bluegene.conf man page for more information\n");
+			"# See the bluegene.conf man page for "
+			"more information\n");
 		xstrcat(save_string, "#\n");
+#ifndef HAVE_BGL
+		xstrcat(save_string, "CnloadImage="
+			"/bgsys/drivers/ppcfloor/boot/cns,"
+			"/bgsys/drivers/ppcfloor/boot/cnk\n");
+		xstrcat(save_string, "MloaderImage="
+			"/bgsys/drivers/ppcfloor/boot/uloader\n");
+		xstrcat(save_string, "IoloadImage="
+			"/bgsys/drivers/ppcfloor/boot/cns,"
+			"/bgsys/drivers/ppcfloor/boot/linux,"
+			"/bgsys/drivers/ppcfloor/boot/ramdisk\n");
+#else
 		xstrcat(save_string, "BlrtsImage="
 		       "/bgl/BlueLight/ppcfloor/bglsys/bin/rts_hw.rts\n");
 		xstrcat(save_string, "LinuxImage="
@@ -1036,18 +1048,28 @@ static int _save_allocation(char *com, List allocated_blocks)
 		       "/bgl/BlueLight/ppcfloor/bglsys/bin/mmcs-mloader.rts\n");
 		xstrcat(save_string, "RamDiskImage="
 		       "/bgl/BlueLight/ppcfloor/bglsys/bin/ramdisk.elf\n");
+#endif
 		xstrcat(save_string, "BridgeAPILogFile="
 		       "/var/log/slurm/bridgeapi.log\n");
-		xstrcat(save_string, "Numpsets=8\n");
-		xstrcat(save_string, "BridgeAPIVerbose=0\n");
+#ifndef HAVE_BGL
+		xstrcat(save_string, "Numpsets=4 # io poor\n");
+		xstrcat(save_string, "# Numpsets=32 # io rich\n");
+#else
+		xstrcat(save_string, "Numpsets=8 # io poor\n");
+		xstrcat(save_string, "# Numpsets=64 # io rich\n");
+#endif
+		xstrcat(save_string, "BridgeAPIVerbose=2\n");
 
 		xstrfmtcat(save_string, "BasePartitionNodeCnt=%d\n",
 			   base_part_node_cnt);
 		xstrfmtcat(save_string, "NodeCardNodeCnt=%d\n",
 			   nodecard_node_cnt);
-		xstrfmtcat(save_string, "LayoutMode=%s\n", layout_mode);
-
-		xstrfmtcat(save_string, "#\n# Block Layout\n#\n");
+		if(!list_count(allocated_blocks))
+			xstrcat(save_string, "LayoutMode=DYNAMIC\n");
+		else {
+			xstrfmtcat(save_string, "LayoutMode=%s\n", layout_mode);
+			xstrfmtcat(save_string, "#\n# Block Layout\n#\n");
+		}
 		results_i = list_iterator_create(allocated_blocks);
 		while((allocated_block = list_next(results_i)) != NULL) {
 			if(allocated_block->request->conn_type == SELECT_TORUS)
diff --git a/src/smap/opts.c b/src/smap/opts.c
index d70aa2a0cfed9efd1657adcdd097a3bcbb94640e..be74612d01fb2879e78fcf05a94a7e8c940f5df2 100644
--- a/src/smap/opts.c
+++ b/src/smap/opts.c
@@ -211,7 +211,7 @@ Usage: smap [OPTIONS]\n\
                              option.  Only specify the ionode number range \n\
                              here.  Specify the node name with the -n option.\n\
                              This option is only valid on Bluegene systems,\n\
-                             and only valid when quering blocks.\n\
+                             and only valid when querying blocks.\n\
   -n, --nodes=[nodes]        only show objects with these nodes.\n\
                              If querying to the ionode level use the -I\n\
                              option in conjunction with this option.\n\
diff --git a/src/squeue/print.c b/src/squeue/print.c
index b7dd85456035c87ea9cf79a40922cff70168ae19..7b5465bebe7045bbfb8b2d4b9270acae69f78136 100644
--- a/src/squeue/print.c
+++ b/src/squeue/print.c
@@ -590,20 +590,16 @@ int _print_job_nodes(job_info_t * job, int width, bool right, char* suffix)
 int _print_job_reason_list(job_info_t * job, int width, bool right,
 		char* suffix)
 {
-	uint16_t base_state = 0;
-
-	if (job)
-		base_state = job->job_state & JOB_STATE_BASE;
-
 	if (job == NULL) {	/* Print the Header instead */
 #ifdef HAVE_BG
 		_print_str("BP_LIST(REASON)", width, right, false);
 #else
 		_print_str("NODELIST(REASON)", width, right, false);
 #endif
-	} else if ((base_state == JOB_PENDING) ||
-	           (base_state == JOB_TIMEOUT) ||
-	           (base_state == JOB_FAILED)) {
+	} else if (!IS_JOB_COMPLETING(job)
+		   && (IS_JOB_PENDING(job)
+		       || IS_JOB_TIMEOUT(job)
+		       || IS_JOB_FAILED(job))) {
 		char id[FORMAT_STRING_SIZE], *reason;
 		if (job->state_desc)
 			reason = job->state_desc;
@@ -1338,11 +1334,10 @@ static int _filter_job(job_info_t * job)
 		if (filter == 1)
 			return 3;
 	} else {
-		uint16_t base_state = job->job_state & JOB_STATE_BASE;
-		if ((base_state != JOB_PENDING)   &&
-		    (base_state != JOB_RUNNING)   &&
-		    (base_state != JOB_SUSPENDED) &&
-		    (!(job->job_state & JOB_COMPLETING)))
+		if (!IS_JOB_PENDING(job) &&
+		    !IS_JOB_RUNNING(job) &&
+		    !IS_JOB_SUSPENDED(job) &&
+		    !IS_JOB_COMPLETING(job))
 			return 4;
 	}
 
diff --git a/src/srun/allocate.c b/src/srun/allocate.c
index d581f378367604967d2917f12ef4e84a3cf2308d..5637ca969b04a5d7b0cbeb16228c232ae8504e56 100644
--- a/src/srun/allocate.c
+++ b/src/srun/allocate.c
@@ -612,6 +612,8 @@ job_desc_msg_create_from_opts ()
 		j->comment = xstrdup(opt.comment);
 	if (opt.qos)
 		j->qos = xstrdup(opt.qos);
+	if (opt.cwd)
+		j->work_dir = xstrdup(opt.cwd);
 
 	if (opt.hold)
 		j->priority     = 0;
diff --git a/src/srun/opt.c b/src/srun/opt.c
index c8d1b0f22c2c004950525862b85bd25b0392812d..7a787e7f1d07f6b91f1bbb3d69bded4198895f52 100644
--- a/src/srun/opt.c
+++ b/src/srun/opt.c
@@ -1308,15 +1308,6 @@ static void set_options(const int argc, char **argv)
 						&opt.min_threads_per_core,
 						NULL, true );
 			break;
-		case LONG_OPT_HINT:
-			if (verify_hint(optarg,
-				&opt.min_sockets_per_node,
-				&opt.min_cores_per_socket,
-				&opt.min_threads_per_core,
-				&opt.cpu_bind_type)) {
-				exit(error_exit);
-			}
-			break;
 		case LONG_OPT_NTASKSPERNODE:
 			opt.ntasks_per_node = _get_int(optarg, "ntasks-per-node",
 				true);
@@ -1329,6 +1320,17 @@ static void set_options(const int argc, char **argv)
 			opt.ntasks_per_core = _get_int(optarg, "ntasks-per-core",
 				true);
 			break;
+		case LONG_OPT_HINT:
+			/* Keep after other options filled in */
+			if (verify_hint(optarg,
+					&opt.min_sockets_per_node,
+					&opt.min_cores_per_socket,
+					&opt.min_threads_per_core,
+					&opt.ntasks_per_core,
+					&opt.cpu_bind_type)) {
+				exit(error_exit);
+			}
+			break;
 		case LONG_OPT_BLRTS_IMAGE:
 			xfree(opt.blrtsimage);
 			opt.blrtsimage = xstrdup(optarg);
diff --git a/src/srun/opt.h b/src/srun/opt.h
index 621a041913e0d299039247f8e3c8c9d4c6539c71..e3762d412b41d348d1333d6a25e53108997e07c4 100644
--- a/src/srun/opt.h
+++ b/src/srun/opt.h
@@ -1,6 +1,6 @@
 /*****************************************************************************\
  *  opt.h - definitions for srun option processing
- *  $Id: opt.h 19190 2009-12-30 00:20:27Z lipari $
+ *  $Id: opt.h 19275 2010-01-19 23:50:05Z jette $
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
@@ -104,7 +104,7 @@ typedef struct srun_options {
 	int32_t min_threads_per_core; /* --threads-per-core=n      */
 	int32_t ntasks_per_node;   /* --ntasks-per-node=n	*/
 	int32_t ntasks_per_socket; /* --ntasks-per-socket=n	*/
-	int32_t ntasks_per_core;   /* --ntasks-per-core=n	*/
+	int ntasks_per_core;	/* --ntasks-per-core=n		*/
 	cpu_bind_type_t cpu_bind_type; /* --cpu_bind=           */
 	char *cpu_bind;		/* binding map for map/mask_cpu */
 	mem_bind_type_t mem_bind_type; /* --mem_bind=		*/
diff --git a/src/srun/srun.c b/src/srun/srun.c
index 28813c117be49bce9dfc0266716aa0840bb13f60..cce0d907910250556276e4e59b51687b90821367 100644
--- a/src/srun/srun.c
+++ b/src/srun/srun.c
@@ -54,6 +54,7 @@
 #  include "src/common/unsetenv.h"
 #endif
 
+#include <sys/param.h>
 #include <sys/resource.h>
 #include <sys/stat.h>
 #include <sys/time.h>
@@ -147,6 +148,7 @@ static void  _set_exit_code(void);
 static int   _setup_signals();
 static void  _step_opt_exclusive(void);
 static void  _set_stdio_fds(srun_job_t *job, slurm_step_io_fds_t *cio_fds);
+static void  _set_submit_dir_env(void);
 static void  _set_prio_process_env(void);
 static int   _set_rlimit_env(void);
 static int   _set_umask_env(void);
@@ -248,6 +250,7 @@ int srun(int ac, char **av)
 	(void) _set_rlimit_env();
 	_set_prio_process_env();
 	(void) _set_umask_env();
+	_set_submit_dir_env();
 
 	/* Set up slurmctld message handler */
 	slurmctld_msg_init();
@@ -633,6 +636,22 @@ static int _set_umask_env(void)
 	return SLURM_SUCCESS;
 }
 
+/* Set SLURM_SUBMIT_DIR environment variable with current state */
+static void _set_submit_dir_env(void)
+{
+	char buf[MAXPATHLEN + 1];
+
+	if ((getcwd(buf, MAXPATHLEN)) == NULL) {
+		error("getcwd failed: %m");
+		exit(error_exit);
+	}
+
+	if (setenvf(NULL, "SLURM_SUBMIT_DIR", "%s", buf) < 0) {
+		error("unable to set SLURM_SUBMIT_DIR in environment");
+		return;
+	}
+}
+
 /*
  * _set_prio_process_env
  *
diff --git a/src/sview/grid.c b/src/sview/grid.c
index 44c2bad10fee2a2e1e56f575c9dd0e41a4db08cd..a286c93188b88ff98b490d6207ea35e0d669b74a 100644
--- a/src/sview/grid.c
+++ b/src/sview/grid.c
@@ -1059,8 +1059,8 @@ extern int setup_grid_table(GtkTable *table, List button_list, List node_list)
 		   not needed for linear systems since they can be
 		   laid out in any fashion
 		*/
-		if (i < 4) {
-			g_error("bad node name %s\n",
+		if (i < 3) {
+			g_print("bad node name %s\n",
 				sview_node_info_ptr->node_ptr->name);
 			goto end_it;
 		} else {
diff --git a/src/sview/job_info.c b/src/sview/job_info.c
index c24c85027e190b0adc8575939ca9a8d6ab7256f1..9a9bfa6f715be521396df079bfbff000a6e49c90 100644
--- a/src/sview/job_info.c
+++ b/src/sview/job_info.c
@@ -43,6 +43,7 @@ typedef struct {
 	int color_inx;
 	job_info_t *job_ptr;
 	int node_cnt;
+	char *nodes;
 #ifdef HAVE_BG
 	bool small_block;
 #endif
@@ -671,7 +672,7 @@ static const char *_set_job_msg(job_desc_msg_t *job_msg, const char *new_text,
 		type = "min cpus per node";
 		if(temp_int <= 0)
 			goto return_error;
-		job_msg->num_procs = (uint32_t)temp_int;
+		job_msg->job_min_cpus = (uint32_t)temp_int;
 		break;
 	case SORTID_TASKS:
 		temp_int = strtol(new_text, (char **)NULL, 10);
@@ -1130,7 +1131,7 @@ static void _layout_job_record(GtkTreeView *treeview,
 		suspend_secs = (time(NULL) - job_ptr->start_time) - now_time;
 		secs2time_str(now_time, running_char, sizeof(running_char));
 
-		nodes = sview_job_info_ptr->job_ptr->nodes;
+		nodes = sview_job_info_ptr->nodes;
 	}
 
 	add_display_treestore_line(update, treestore, &iter,
@@ -1608,7 +1609,7 @@ static void _update_job_record(sview_job_info_t *sview_job_info_ptr,
 		}
 		suspend_secs = (time(NULL) - job_ptr->start_time) - now_time;
 		secs2time_str(now_time, tmp_char, sizeof(tmp_char));
-		nodes = sview_job_info_ptr->job_ptr->nodes;
+		nodes = sview_job_info_ptr->nodes;
 	}
 	gtk_tree_store_set(treestore, iter, SORTID_COLOR,
 			   sview_colors[sview_job_info_ptr->color_inx], -1);
@@ -2270,6 +2271,7 @@ static void _job_info_list_del(void *object)
 	sview_job_info_t *sview_job_info = (sview_job_info_t *)object;
 
 	if (sview_job_info) {
+		xfree(sview_job_info->nodes);
 		if(sview_job_info->step_list)
 			list_destroy(sview_job_info->step_list);
 		xfree(sview_job_info);
@@ -2287,8 +2289,8 @@ static int _sview_job_sort_aval_dec(sview_job_info_t* rec_a,
 	else if (size_a > size_b)
 		return 1;
 
-	if(rec_a->job_ptr->nodes && rec_b->job_ptr->nodes) {
-		size_a = strcmp(rec_a->job_ptr->nodes, rec_b->job_ptr->nodes);
+	if(rec_a->nodes && rec_b->nodes) {
+		size_a = strcmp(rec_a->nodes, rec_b->nodes);
 		if (size_a < 0)
 			return -1;
 		else if (size_a > 0)
@@ -2350,9 +2352,11 @@ static List _create_job_info_list(job_info_msg_t *job_info_ptr,
 		if(job_ptr->nodes && ionodes) {
 			sview_job_info_ptr->small_block = 1;
 			snprintf(tmp_char, sizeof(tmp_char), "%s[%s]",
-					 job_ptr->nodes, ionodes);
+				 job_ptr->nodes, ionodes);
 			xfree(ionodes);
-			sview_job_info_ptr->job_ptr->nodes = xstrdup(tmp_char);
+			/* keep a different string here so we don't
+			   just keep tacking on ionodes to a node list */
+			sview_job_info_ptr->nodes = xstrdup(tmp_char);
 		}
 #endif
 		if(!sview_job_info_ptr->node_cnt)
@@ -2379,8 +2383,7 @@ static List _create_job_info_list(job_info_msg_t *job_info_ptr,
 				 */
 				xfree(step_ptr->nodes);
 				step_ptr->nodes =
-					xstrdup(sview_job_info_ptr->
-						job_ptr->nodes);
+					xstrdup(sview_job_info_ptr->nodes);
 				step_ptr->num_tasks =
 					sview_job_info_ptr->node_cnt;
 				xfree(step_ptr->node_inx);
diff --git a/src/sview/node_info.c b/src/sview/node_info.c
index 7263ed0496e4d916a2c41be169775a2b17242a0b..fb333541f30cbf1b88bdae0aa7178c846fa57b51 100644
--- a/src/sview/node_info.c
+++ b/src/sview/node_info.c
@@ -290,8 +290,11 @@ static void _update_node_record(node_info_t *node_ptr,
 	gtk_tree_store_set(treestore, iter, SORTID_ERR_CPUS,
 			   tmp_cnt, -1);
 
-	if((alloc_cpus && err_cpus)
-	   || (idle_cpus  && (idle_cpus != node_ptr->cpus))) {
+	if(IS_NODE_DRAIN(node_ptr)) {
+		/* don't worry about mixed since the
+		   whole node is being drained. */
+	} else if((alloc_cpus && err_cpus)
+		  || (idle_cpus  && (idle_cpus != node_ptr->cpus))) {
 		node_ptr->node_state &= NODE_STATE_FLAGS;
 		node_ptr->node_state |= NODE_STATE_MIXED;
 	}
@@ -603,13 +606,16 @@ extern int get_new_info_node(node_info_msg_t **info_ptr, int force)
 #endif
 			idle_cpus -= err_cpus;
 
-			if ((alloc_cpus && err_cpus) ||
+			if(IS_NODE_DRAIN(node_ptr)) {
+				/* don't worry about mixed since the
+				   whole node is being drained. */
+			} else if ((alloc_cpus && err_cpus) ||
 			    (idle_cpus  && (idle_cpus != node_ptr->cpus))) {
+				node_ptr->node_state &= NODE_STATE_FLAGS;
 				if(err_cpus)
 					node_ptr->node_state
-						|= NODE_STATE_DRAIN;
+						|= NODE_STATE_ERROR;
 
-				node_ptr->node_state &= NODE_STATE_FLAGS;
 				node_ptr->node_state |= NODE_STATE_MIXED;
 			} else if(err_cpus) {
 				node_ptr->node_state &= NODE_STATE_FLAGS;
@@ -1037,8 +1043,6 @@ extern void specific_info_node(popup_info_t *popup_win)
 	hostlist_iterator_t host_itr = NULL;
 	int i = -1;
 	sview_search_info_t *search_info = spec_info->search_info;
-	bool drain_flag1 = false, comp_flag1 = false, no_resp_flag1 = false;
-	bool drain_flag2 = false, comp_flag2 = false, no_resp_flag2 = false;
 
 	if(!spec_info->display_widget)
 		setup_popup_info(popup_win, display_data_node, SORTID_CNT);
@@ -1116,16 +1120,9 @@ display_it:
 	}
 
 	i = -1;
-	if(search_info->int_data != NO_VAL) {
-		drain_flag1 = (search_info->int_data & NODE_STATE_DRAIN);
-		comp_flag1 = (search_info->int_data & NODE_STATE_COMPLETING);
-		no_resp_flag1 = (search_info->int_data
-				 & NODE_STATE_NO_RESPOND);
-	}
 
 	itr = list_iterator_create(info_list);
 	while ((sview_node_info_ptr = list_next(itr))) {
-		uint16_t tmp_16 = 0;
 		int found = 0;
 		char *host = NULL;
 		i++;
@@ -1135,66 +1132,59 @@ display_it:
 		case SEARCH_NODE_STATE:
 			if(search_info->int_data == NO_VAL)
 				continue;
-
-			drain_flag2 = (node_ptr->node_state
-				       & NODE_STATE_DRAIN);
-			comp_flag2 = (node_ptr->node_state
-				      & NODE_STATE_COMPLETING);
-			no_resp_flag2 = (node_ptr->node_state
-					 & NODE_STATE_NO_RESPOND);
-
-			if(drain_flag1 && drain_flag2)
-				break;
-			else if(comp_flag1 && comp_flag2)
-				break;
-			else if(no_resp_flag1 && no_resp_flag2)
-				break;
-
-			if(node_ptr->node_state != search_info->int_data) {
-				if((search_info->int_data & NODE_STATE_BASE)
-				   == NODE_STATE_ALLOCATED) {
+			else if(search_info->int_data != node_ptr->node_state) {
+				if(IS_NODE_MIXED(node_ptr)) {
+					uint16_t alloc_cnt = 0, err_cnt = 0;
+					uint16_t idle_cnt = node_ptr->cpus;
 					select_g_select_nodeinfo_get(
 						node_ptr->select_nodeinfo,
 						SELECT_NODEDATA_SUBCNT,
 						NODE_STATE_ALLOCATED,
-						&tmp_16);
-					if(tmp_16)
-						break;
-				}
-				if((search_info->int_data & NODE_STATE_BASE)
-				   == NODE_STATE_ERROR) {
+						&alloc_cnt);
 					select_g_select_nodeinfo_get(
 						node_ptr->select_nodeinfo,
 						SELECT_NODEDATA_SUBCNT,
 						NODE_STATE_ERROR,
-						&tmp_16);
-					if(tmp_16)
-						break;
+						&err_cnt);
+					idle_cnt -= (alloc_cnt + err_cnt);
+					if((search_info->int_data
+					    & NODE_STATE_BASE)
+					   == NODE_STATE_ALLOCATED) {
+						if(alloc_cnt)
+							break;
+					} else if((search_info->int_data
+						   & NODE_STATE_BASE)
+						  == NODE_STATE_ERROR) {
+						if(err_cnt)
+							break;
+					} else if((search_info->int_data
+						   & NODE_STATE_BASE)
+						  == NODE_STATE_IDLE) {
+						if(idle_cnt)
+							break;
+					}
 				}
 				continue;
 			}
 			break;
-
 		case SEARCH_NODE_NAME:
 		default:
-			/* Nothing to do here since we just are
-			 * looking for the node name */
-			break;
-		}
-		if(!search_info->gchar_data)
-			continue;
-		while((host = hostlist_next(host_itr))) {
-			if(!strcmp(host, node_ptr->name)) {
+			if(!search_info->gchar_data)
+				continue;
+			while((host = hostlist_next(host_itr))) {
+				if(!strcmp(host, node_ptr->name)) {
+					free(host);
+					found = 1;
+					break;
+				}
 				free(host);
-				found = 1;
-				break;
 			}
-			free(host);
-		}
-		hostlist_iterator_reset(host_itr);
+			hostlist_iterator_reset(host_itr);
 
-		if(!found)
-			continue;
+			if(!found)
+				continue;
+			break;
+		}
 		list_push(send_info_list, sview_node_info_ptr);
 		change_grid_color(popup_win->grid_button_list,
 				  i, i, 0, true, 0);
diff --git a/src/sview/part_info.c b/src/sview/part_info.c
index b86e70e998cf7069b992e2f60c89e6b1fba94e75..5833191d2d50f1f7a038f17f4351e1706c5839e8 100644
--- a/src/sview/part_info.c
+++ b/src/sview/part_info.c
@@ -2034,8 +2034,32 @@ display_it:
 			hostset_destroy(hostset);
 			break;
 		case PART_PAGE:
+			switch(spec_info->search_info->search_type) {
+			case SEARCH_PARTITION_NAME:
+				if(!spec_info->search_info->gchar_data)
+					continue;
+
+				if(strcmp(part_ptr->name,
+					  spec_info->search_info->gchar_data))
+					continue;
+				break;
+			case SEARCH_PARTITION_STATE:
+				if(spec_info->search_info->int_data == NO_VAL)
+					continue;
+				if(part_ptr->state_up !=
+				   spec_info->search_info->int_data)
+					continue;
+				break;
+			default:
+				continue;
+				break;
+			}
+			break;
 		case BLOCK_PAGE:
 		case JOB_PAGE:
+			if(!spec_info->search_info->gchar_data)
+				continue;
+
 			if(strcmp(part_ptr->name,
 				  spec_info->search_info->gchar_data))
 				continue;
diff --git a/src/sview/popups.c b/src/sview/popups.c
index 7034831c66870e4fbe3555272210c5de802c2e69..a90a413b8f578b9dd61d0c7a83aa01a8a7013eb9 100644
--- a/src/sview/popups.c
+++ b/src/sview/popups.c
@@ -439,13 +439,16 @@ extern void create_search_popup(GtkAction *action, gpointer user_data)
 #endif
 	} else if(!strcmp(name, "node_state")) {
 		display_data_t pulldown_display_data[] = {
-			{G_TYPE_NONE, NODE_STATE_UNKNOWN, "Down", TRUE, -1},
-			{G_TYPE_NONE, NODE_STATE_NO_RESPOND, "No Response",
-			 TRUE, -1},
-			{G_TYPE_NONE, NODE_STATE_DRAIN, "Drained", TRUE, -1},
+			{G_TYPE_NONE, NODE_STATE_DOWN, "Down", TRUE, -1},
+			{G_TYPE_NONE, NODE_STATE_ALLOCATED | NODE_STATE_DRAIN,
+			 "Draining", TRUE, -1},
+			{G_TYPE_NONE, NODE_STATE_IDLE | NODE_STATE_DRAIN,
+			 "Drained", TRUE, -1},
 			{G_TYPE_NONE, NODE_STATE_IDLE, "Idle", TRUE, -1},
 			{G_TYPE_NONE, NODE_STATE_ALLOCATED, "Allocated",
 			 TRUE, -1},
+			{G_TYPE_NONE, NODE_STATE_ERROR, "Error", TRUE, -1},
+			{G_TYPE_NONE, NODE_STATE_MIXED, "Mixed", TRUE, -1},
 			{G_TYPE_NONE, NODE_STATE_COMPLETING, "Completing",
 			 TRUE, -1},
 			{G_TYPE_NONE, NODE_STATE_UNKNOWN, "Unknown", TRUE, -1},
diff --git a/testsuite/expect/test1.60 b/testsuite/expect/test1.60
index d409a4d8524903fca27caa757253207b72efcf40..c99600a6302d52f4fb8b541e455b6d3a521b4718 100755
--- a/testsuite/expect/test1.60
+++ b/testsuite/expect/test1.60
@@ -169,7 +169,7 @@ if {$job_id == 0} {
 	exit 1
 }
 if {[test_front_end] != 0} {
-	send_user "\nWARNING: Additional tests are incompatable with front-end systems\n"
+	send_user "\nWARNING: Additional tests are incompatible with front-end systems\n"
 	exit $exit_code
 }
 
diff --git a/testsuite/expect/test1.91 b/testsuite/expect/test1.91
index c89c66e1efdcfaaa5b617cfab9b1cb6876d6faf1..2f6dfd0f94b618a165158368be8afe0826df3302 100755
--- a/testsuite/expect/test1.91
+++ b/testsuite/expect/test1.91
@@ -110,14 +110,14 @@ set num_threads 0
 log_user 0
 spawn $scontrol show node $node_name
 expect {
-	-re "Sockets=($number)" {
-		set num_sockets $expect_out(1,string)
-		exp_continue
-	}
 	-re "CoresPerSocket=($number)" {
 	   	set num_cores $expect_out(1,string)
 		exp_continue
 	}
+	-re "Sockets=($number)" {
+		set num_sockets $expect_out(1,string)
+		exp_continue
+	}
 	-re "ThreadsPerCore=($number)" {
 	   	set num_threads $expect_out(1,string)
 		exp_continue
diff --git a/testsuite/expect/test14.6 b/testsuite/expect/test14.6
index b155be9e74ea0e624cab5a5026fd05522f9c12d0..f7ceb0bb040867d2513a03ca3b056ac26f855271 100755
--- a/testsuite/expect/test14.6
+++ b/testsuite/expect/test14.6
@@ -47,7 +47,7 @@ if {[test_front_end] != 0} {
 	exit 0
 }
 if {[slurmd_user_root] == 0} {
-	send_user "\nWARNING: This test is incompatable with SlurmdUser != root\n"
+	send_user "\nWARNING: This test is incompatible with SlurmdUser != root\n"
 	exit 0
 }
 
diff --git a/testsuite/expect/test21.10 b/testsuite/expect/test21.10
index 0ccdd1bd8f87d9de1042713f4ad9cf662d8d4c5a..f5a21aee6fe8a83feb96232537495c19d638ae53 100755
--- a/testsuite/expect/test21.10
+++ b/testsuite/expect/test21.10
@@ -287,7 +287,7 @@ if {$aamatches != 1} {
 #
 # Use sacctmgr to list the test associations
 #
-set as_list_pid [spawn $sacctmgr list $acc $wa $nam=$nm1 format=$nams,$fs,$gm,$gc,$gj,$gs,$gn,$gw,$mm,$mc,$mj,$ms,$mn,$mw]
+set as_list_pid [spawn $sacctmgr list $acc $wa $nam=$nm1 format=$nam,$fs,$gm,$gc,$gj,$gs,$gn,$gw,$mm,$mc,$mj,$ms,$mn,$mw]
 expect {
 	-re "$nm1,$fs1,$gm1,$gc1,$gj1,$gs1,$gn1,$gw1,$mm1,$mc1,$mj1,$ms1,$mn1,$mw1" {
 		exp_continue
diff --git a/testsuite/expect/test7.5 b/testsuite/expect/test7.5
index bc2edf3b7f573d887d1503f6f73a53f860051b33..7abd866337213312de6e98625f38bf852e3c2a2e 100755
--- a/testsuite/expect/test7.5
+++ b/testsuite/expect/test7.5
@@ -46,7 +46,7 @@ set usr2cnt     0
 print_header $test_id
 
 if { [test_xcpu] } {
-	send_user "\nWARNING: This test is incompatable with XCPU systems\n"
+	send_user "\nWARNING: This test is incompatible with XCPU systems\n"
 	exit $exit_code
 }