diff --git a/META b/META
index 12961437fcf9bbbf3f423a14bc95af332a936f7a..8c3183f34ca8bc5e991bc5b9162eb2a227a69b82 100644
--- a/META
+++ b/META
@@ -3,9 +3,9 @@
   Api_revision:  0
   Major:         2
   Meta:          1
-  Micro:         1
+  Micro:         2
   Minor:         1
   Name:          slurm
   Release:       1
   Release_tags:  dist
-  Version:       2.1.1
+  Version:       2.1.2
diff --git a/NEWS b/NEWS
index 6ab1d7b2a0974b6b32e6520acee21da643b575bd..65a65b68f4b33fccb28d04d2a00880d3c76b5a81 100644
--- a/NEWS
+++ b/NEWS
@@ -1,6 +1,35 @@
 This file describes changes in recent versions of SLURM. It primarily
 documents those changes that are of interest to users and admins.
 
+* Changes in SLURM 2.1.2
+=============================
+ -- Added nodelist to sview for jobs on non-bluegene systems
+ -- Correction in value of batch job environment variable SLURM_TASKS_PER_NODE
+    under some conditions.
+ -- When a node silently fails which is already drained/down the reason
+    for draining for the node is not changed.
+ -- Srun will ignore SLURM_NNODES environment variable and use the count of 
+    currently allocated nodes if that count changes during the job's lifetime 
+    (e.g. job allocation uses the --no-kill option and a node goes DOWN, job 
+    step would previously always fail).
+ -- Made it so sacctmgr can't add blank user or account.  The MySQL plugin
+    will also reject such requests.
+ -- Revert libpmi.so version for compatibility with SLURM version 2.0 and
+    earlier to avoid forcing applications using a specific libpmi.so version to
+    rebuild unnecessarily (revert from libpmi.so.21.0.0 to libpmi.so.0.0.0).
+ -- Restore support for a pending job's constraints (required node features) 
+    when slurmctld is restarted (internal structure needed to be rebuilt).
+ -- Removed checkpoint_blcr.so from the plugin rpm in the slurm.spec since
+    it is also in the blcr rpm.
+ -- Fixed issue in sview where you were unable to edit the count
+    of jobs to share resources.
+ -- BLUEGENE - Fixed issue where tasks on steps weren't being displayed
+    correctly with scontrol and sview.
+ -- BLUEGENE - fixed wiki2 plugin to report correct task count for pending jobs.
+ -- BLUEGENE - Added /etc/ld.so.conf.d/slurm.conf to point to the
+    directory holding libsched_if64.so when building rpms.
+ -- Adjust get_wckeys call in slurmdbd to allow operators to list wckeys.
+
 * Changes in SLURM 2.1.1
 =============================
  -- Fix for case sensitive databases when a slurmctld has a mixed case
@@ -4698,7 +4727,7 @@ documents those changes that are of interest to users and admins.
 * Changes in SLURM 0.2.9
 ========================
  -- Fixes for reported problems:
-   - Argument to srun `-n' option was taken as octal if preceeded with a `0'.
+   - Argument to srun `-n' option was taken as octal if preceded with a `0'.
  -- New format for Elan hosts config file (/etc/elanhosts. See README)
  -- Various fixes for managing COMPLETING jobs.
  -- Support for passing options to squeue via environment variables 
@@ -4799,4 +4828,4 @@ documents those changes that are of interest to users and admins.
  -- Change directory to /tmp in slurmd if daemonizing.
  -- Logfiles are reopened on reconfigure.
  
-$Id: NEWS 19293 2010-01-21 01:45:33Z da $
+$Id: NEWS 19384 2010-02-02 22:11:39Z da $
diff --git a/doc/html/accounting.shtml b/doc/html/accounting.shtml
index 80ce63554685efd4dc9693af7e8fa75a5cc09789..4c01186639e25e6c4b3be2b40f6969a5e2bbaf05 100644
--- a/doc/html/accounting.shtml
+++ b/doc/html/accounting.shtml
@@ -87,6 +87,19 @@ SlurmDBD is installed. Install the <i>slurmdbd</i> and
 <i>slurm-plugins</i> RPMs on the computer when SlurmDBD
 is to execute.</p>
 
+<p>If SlurmDBD is configured for use but not responding then <i>slurmctld</i> 
+will utilize an interal cache until SlurmDBD is returned to service.
+The cached data is written by <i>slurmctld</i> to local storage upon shutdown
+and recovered at startup.
+If SlurmDBD is not available when <i>slurmctld</i> starts, a cache of 
+valid bank accounts, user limits, etc. based upon their state when the 
+daemons were last communicating will be used. 
+Note that SlurmDBD must be responding when <i>slurmctld</i> is first started
+since no cache of this critical data will be available.
+Job and step accounting records generated by <i>slurmctld</i> will be 
+written to a cache as needed and transfered to SlurmDBD when returned to 
+service.</p> 
+
 <h2>Infrastructure</h2>
 
 <p>With the SlurmDBD, we are able to collect data from multiple
@@ -753,7 +766,7 @@ as deleted.
 If an entity has existed for less than 1 day, the entity will be removed
 completely. This is meant to clean up after typographic errors.</p>
 
-<p style="text-align: center;">Last modified 2 March 2009</p>
+<p style="text-align: center;">Last modified 25 January 2010</p>
 
 <!--#include virtual="footer.txt"-->
 
diff --git a/doc/html/checkpoint_blcr.shtml b/doc/html/checkpoint_blcr.shtml
index 5d9a64133dd6987286aa17b8d2fb441c80ea9990..31281bbcbaeadafd88d8db19cc700384a6bfc9ef 100644
--- a/doc/html/checkpoint_blcr.shtml
+++ b/doc/html/checkpoint_blcr.shtml
@@ -181,12 +181,18 @@ same set of nodes from which it was previously checkpointed.</li>
 
 <p>The following SLURM configuration parameter has been added:</p>
 <ul>
-<li><b>JobCheckpointDir</b> specified the default directory for storing
-or reading job checkpoint files</li>
+<li><b>JobCheckpointDir</b> 
+Specifies the default directory for storing or reading job checkpoint 
+information. The data stored here is only a few thousand bytes per job 
+and includes information needed to resubmit the job request, not job's
+memory image. The directory must be readable and writable by 
+<b>SlurmUser</b>, but not writable by regular users. The job memory images
+may be in a different location as specified by <b>--checkpoint-dir</b>
+option at job submit time or scontrol's <b>ImageDir</b> option.
 </ul>
 
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 7 January 2010</p>
+<p style="text-align:center;">Last modified 26 January 2010</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/man/man1/salloc.1 b/doc/man/man1/salloc.1
index 4536bab0e0a0d662e2a32e4d11b2f055a02f763b..c89852dc1d2e1c84c67a0d66d33e06cfaaf365b4 100644
--- a/doc/man/man1/salloc.1
+++ b/doc/man/man1/salloc.1
@@ -658,8 +658,9 @@ ignored if \fISchedulerType=sched/wiki\fR or
 
 .TP
 \fB\-\-ntasks\-per\-core\fR=<\fIntasks\fR>
-Request that no more than \fIntasks\fR be invoked on each core.
-Similar to \fB\-\-ntasks\-per\-node\fR except at the core level
+Request that \fIntasks\fR be invoked on each core.
+Meant to be used with the \fB\-\-ntasks\fR option.
+Related to \fB\-\-ntasks\-per\-node\fR except at the core level
 instead of the node level.  Masks will automatically be generated
 to bind the tasks to specific core unless \fB\-\-cpu_bind=none\fR
 is specified.
@@ -669,8 +670,9 @@ NOTE: This option is not supported unless
 
 .TP
 \fB\-\-ntasks\-per\-socket\fR=<\fIntasks\fR>
-Request that no more than \fIntasks\fR be invoked on each socket.
-Similar to \fB\-\-ntasks\-per\-node\fR except at the socket level
+Request that \fIntasks\fR be invoked on each socket.
+Meant to be used with the \fB\-\-ntasks\fR option.
+Related to \fB\-\-ntasks\-per\-node\fR except at the socket level
 instead of the node level.  Masks will automatically be generated
 to bind the tasks to specific sockets unless \fB\-\-cpu_bind=none\fR
 is specified.
@@ -680,11 +682,12 @@ NOTE: This option is not supported unless
 
 .TP
 \fB\-\-ntasks\-per\-node\fR=<\fIntasks\fR>
-Request that no more than \fIntasks\fR be invoked on each node.
-This is similar to using \fB\-\-cpus\-per\-task\fR=\fIncpus\fR
+Request that \fIntasks\fR be invoked on each node.
+Meant to be used with the \fB\-\-nodes\fR option.
+This is related to \fB\-\-cpus\-per\-task\fR=\fIncpus\fR,
 but does not require knowledge of the actual number of cpus on
 each node.  In some cases, it is more convenient to be able to
-request that no more than a specific number of ntasks be invoked
+request that no more than a specific number of tasks be invoked
 on each node.  Examples of this include submitting
 a hybrid MPI/OpenMP app where only one MPI "task/rank" should be
 assigned to each node while allowing the OpenMP portion to utilize
diff --git a/doc/man/man1/sbatch.1 b/doc/man/man1/sbatch.1
index 96e3172cf310bd9970ada91ec2ada377a8a3bca9..0655702318f2c3f1f67d25a79cd0e286e95d2cf0 100644
--- a/doc/man/man1/sbatch.1
+++ b/doc/man/man1/sbatch.1
@@ -697,8 +697,9 @@ behavior on the cluster.
 
 .TP
 \fB\-\-ntasks\-per\-core\fR=<\fIntasks\fR>
-Request that no more than \fIntasks\fR be invoked on each core.
-Similar to \fB\-\-ntasks\-per\-node\fR except at the core level
+Request that \fIntasks\fR be invoked on each core.
+Meant to be used with the \fB\-\-ntasks\fR option.
+Related to \fB\-\-ntasks\-per\-node\fR except at the core level
 instead of the node level.  Masks will automatically be generated
 to bind the tasks to specific core unless \fB\-\-cpu_bind=none\fR
 is specified.
@@ -708,8 +709,9 @@ NOTE: This option is not supported unless
 
 .TP
 \fB\-\-ntasks\-per\-socket\fR=<\fIntasks\fR>
-Request that no more than \fIntasks\fR be invoked on each socket.
-Similar to \fB\-\-ntasks\-per\-node\fR except at the socket level
+Request that \fIntasks\fR be invoked on each socket.
+Meant to be used with the \fB\-\-ntasks\fR option.
+Related to \fB\-\-ntasks\-per\-node\fR except at the socket level
 instead of the node level.  Masks will automatically be generated
 to bind the tasks to specific sockets unless \fB\-\-cpu_bind=none\fR
 is specified.
@@ -719,11 +721,12 @@ NOTE: This option is not supported unless
 
 .TP
 \fB\-\-ntasks\-per\-node\fR=<\fIntasks\fR>
-Request that no more than \fIntasks\fR be invoked on each node.
-This is similar to using \fB\-\-cpus\-per\-task\fR=\fIncpus\fR
+Request that \fIntasks\fR be invoked on each node.
+Meant to be used with the \fB\-\-nodes\fR option.
+This is related to \fB\-\-cpus\-per\-task\fR=\fIncpus\fR,
 but does not require knowledge of the actual number of cpus on
 each node.  In some cases, it is more convenient to be able to
-request that no more than a specific number of ntasks be invoked
+request that no more than a specific number of tasks be invoked
 on each node.  Examples of this include submitting
 a hybrid MPI/OpenMP app where only one MPI "task/rank" should be
 assigned to each node while allowing the OpenMP portion to utilize
diff --git a/doc/man/man1/scontrol.1 b/doc/man/man1/scontrol.1
index d04dcdfc40f07c8126dc30e69c16619b9e5da0e1..2cf1b8942082275bb4a79af1d72536581647e90c 100644
--- a/doc/man/man1/scontrol.1
+++ b/doc/man/man1/scontrol.1
@@ -603,6 +603,15 @@ Update slurm.conf with any changes meant to be persistent.
 Identify the user groups which may use this partition.
 Multiple groups may be specified in a comma separated list.
 To permit all groups to use the partition specify "AllowGroups=ALL".
+
+.TP
+\fIAllocNodes\fP=<name>
+Comma separated list of nodes from which users can execute jobs in the
+partition.
+Node names may be specified using the node range expression syntax
+described above.
+The default value is "ALL".
+
 .TP
 \fIDefault\fP=<yes|no>
 Specify if this partition is to be used by jobs which do not explicitly
@@ -612,6 +621,12 @@ In order to change the default partition of a running system,
 use the scontrol update command and set Default=yes for the partition
 that you want to become the new default.
 
+.TP
+\fIDefaultTime\fP=<time>
+Run time limit used for jobs that don't specify a value. If not set
+then MaxTime will be used.
+Format is the same as for MaxTime.
+
 .TP
 \fIHidden\fP=<yes|no>
 Specify if the partition and its jobs should be hidden from view.
@@ -650,6 +665,15 @@ Specify a blank data value to remove all nodes from a partition: "Nodes=".
 \fIPartitionName\fP=<name>
 Identify the partition to be updated. This specification is required.
 
+.TP
+\fIPriority\fP=<count>
+Jobs submitted to a higher priority partition will be dispatched
+before pending jobs in lower priority partitions and if possible
+they will preempt running jobs from lower priority partitions.
+Note that a partition's priority takes precedence over a job's
+priority.
+The value may not exceed 65533.
+
 .TP
 \fIRootOnly\fP=<yes|no>
 Specify if only allocation requests initiated by user root will be satisfied.
diff --git a/doc/man/man1/sinfo.1 b/doc/man/man1/sinfo.1
index d0ea54f8ef911eb84fab5dddacd26cb8d83e4223..7cf092586e8f3e19621ef1fd5b8b00ac106d70f9 100644
--- a/doc/man/man1/sinfo.1
+++ b/doc/man/man1/sinfo.1
@@ -240,10 +240,10 @@ This is ignored if the \fB\-\-format\fR option is specified.
 Specification of the order in which records should be reported.
 This uses the same field specifciation as the <output_format>.
 Multiple sorts may be performed by listing multiple sort fields
-separated by commas.  The field specifications may be preceeded
+separated by commas.  The field specifications may be preceded
 by "+" or "\-" for assending (default) and desending order
 respectively.  The partition field specification, "P", may be
-preceeded by a "#" to report partitions in the same order that
+preceded by a "#" to report partitions in the same order that
 they appear in SLURM's  configuration file, \fBslurm.conf\fR.
 For example, a sort value of "+P,\-m" requests that records
 be printed in order of increasing partition name and within a
diff --git a/doc/man/man1/squeue.1 b/doc/man/man1/squeue.1
index 0b0123c418cd3befb8d0aaffb3a6f296c78263ac..b89a31a0de5ce3be85d05786056118ba4bbd8aa2 100644
--- a/doc/man/man1/squeue.1
+++ b/doc/man/man1/squeue.1
@@ -289,7 +289,7 @@ Specification of the order in which records should be reported.
 This uses the same field specifciation as the <output_format>.
 Multiple sorts may be performed by listing multiple sort fields
 separated by commas.
-The field specifications may be preceeded by "+" or "\-" for
+The field specifications may be preceded by "+" or "\-" for
 ascending (default) and descending order respectively.
 For example, a sort value of "P,U" will sort the
 records by partition name then by user id.
@@ -366,7 +366,7 @@ The job's time limit exceeds it's partition's current time limit.
 One or more higher priority jobs exist for this partition.
 .TP
 \fBResources\fR
-The job is waiting for resources to become availble.
+The job is waiting for resources to become available.
 .TP
 \fBNodeDown\fR
 A node required by the job is down.
diff --git a/doc/man/man1/srun.1 b/doc/man/man1/srun.1
index 8de02bb3fba444060ba568e5b687714204af0a15..c0fb11a5f4f6e050d0ef589734a31338154f605b 100644
--- a/doc/man/man1/srun.1
+++ b/doc/man/man1/srun.1
@@ -454,17 +454,23 @@ SLURM_JOB_ID environment variable was set.
 
 .TP
 \fB\-K\fR, \fB\-\-kill\-on\-bad\-exit\fR
-Terminate a job if any task exits with a non\-zero exit code.
+Immediately terminate a job if any task exits with a non\-zero exit code.
+Note: The \fB\-K\fR, \fB\-\-kill\-on\-bad\-exit\fR option takes precedence
+over \fB\-W\fR, \fB\-\-wait\fR to terminate the job immediately if a task
+exits with a non\-zero exit code.
 
 .TP
 \fB\-k\fR, \fB\-\-no\-kill\fR
 Do not automatically terminate a job of one of the nodes it has been
 allocated fails.  This option is only recognized on a job allocation,
 not for the submission of individual job steps.
-The job will assume all responsibilities for fault\-tolerance. The
-active job step (MPI job) will almost certainly suffer a fatal error,
-but subsequent job steps may be run if this option is specified. The
-default action is to terminate job upon node failure.
+The job will assume all responsibilities for fault\-tolerance. 
+Tasks launch using this option will not be considered terminated
+(e.g. \fB\-K\fR, \fB\-\-kill\-on\-bad\-exit\fR and
+\fB\-W\fR, \fB\-\-wait\fR options will have no effect upon the job step).
+The active job step (MPI job) will likely suffer a fatal error,
+but subsequent job steps may be run if this option is specified. 
+The default action is to terminate the job upon node failure.
 
 .TP
 \fB\-l\fR, \fB\-\-label\fR
@@ -754,8 +760,9 @@ ignored if \fISchedulerType=sched/wiki\fR or
 
 .TP
 \fB\-\-ntasks\-per\-core\fR=<\fIntasks\fR>
-Request that no more than \fIntasks\fR be invoked on each core.
-Similar to \fB\-\-ntasks\-per\-node\fR except at the core level
+Request that \fIntasks\fR be invoked on each core.
+Meant to be used with the \fB\-\-ntasks\fR option.
+Related to \fB\-\-ntasks\-per\-node\fR except at the core level
 instead of the node level.  Masks will automatically be generated
 to bind the tasks to specific core unless \fB\-\-cpu_bind=none\fR
 is specified.
@@ -765,8 +772,9 @@ NOTE: This option is not supported unless
 
 .TP
 \fB\-\-ntasks\-per\-socket\fR=<\fIntasks\fR>
-Request that no more than \fIntasks\fR be invoked on each socket.
-Similar to \fB\-\-ntasks\-per\-node\fR except at the socket level
+Request that \fIntasks\fR be invoked on each socket.
+Meant to be used with the \fB\-\-ntasks\fR option.
+Related to \fB\-\-ntasks\-per\-node\fR except at the socket level
 instead of the node level.  Masks will automatically be generated
 to bind the tasks to specific sockets unless \fB\-\-cpu_bind=none\fR
 is specified.
@@ -776,11 +784,12 @@ NOTE: This option is not supported unless
 
 .TP
 \fB\-\-ntasks\-per\-node\fR=<\fIntasks\fR>
-Request that no more than \fIntasks\fR be invoked on each node.
-This is similar to using \fB\-\-cpus\-per\-task\fR=\fIncpus\fR
+Request that \fIntasks\fR be invoked on each node.
+Meant to be used with the \fB\-\-nodes\fR option.
+This is related to \fB\-\-cpus\-per\-task\fR=\fIncpus\fR,
 but does not require knowledge of the actual number of cpus on
 each node.  In some cases, it is more convenient to be able to
-request that no more than a specific number of ntasks be invoked
+request that no more than a specific number of tasks be invoked
 on each node.  Examples of this include submitting
 a hybrid MPI/OpenMP app where only one MPI "task/rank" should be
 assigned to each node while allowing the OpenMP portion to utilize
@@ -1044,6 +1053,9 @@ be issued after 60 seconds). The default value is set by the WaitTime
 parameter in the slurm configuration file (see \fBslurm.conf(5)\fR). This
 option can be useful to insure that a job is terminated in a timely fashion
 in the event that one or more tasks terminate prematurely.
+Note: The \fB\-K\fR, \fB\-\-kill\-on\-bad\-exit\fR option takes precedence
+over \fB\-W\fR, \fB\-\-wait\fR to terminate the job immediately if a task 
+exits with a non\-zero exit code.
 
 .TP
 \fB\-w\fR, \fB\-\-nodelist\fR=<\fIhost1,host2,...\fR or \fIfilename\fR>
diff --git a/doc/man/man1/sview.1 b/doc/man/man1/sview.1
index fa63859e335f497a132fd666e4a1c66ff79ae5d7..905dc431855ffb136450c667a5f0294107175d7f 100644
--- a/doc/man/man1/sview.1
+++ b/doc/man/man1/sview.1
@@ -20,7 +20,7 @@ Left\-click on the tab of the display you would like to see.
 Right\-click on the tab in order to control which fields will be displayed.
 .LP
 Within the display window, left\-click on the header to control the sort
-order of entries (e.g. increasing or decreasing) in the diplay.
+order of entries (e.g. increasing or decreasing) in the display.
 You can also left\-click and drag the headers to move them right or left in the display.
 If a JobID has an arrow next to it, click on that arrow to display or hide
 information about that job's steps.
diff --git a/doc/man/man3/slurm_allocate_resources.3 b/doc/man/man3/slurm_allocate_resources.3
index 8f227ae1fa5c07698a05cb7d32d58df755f969ee..f7ec6db6fc935426b66a5e0776b67490ee5fa8c5 100644
--- a/doc/man/man3/slurm_allocate_resources.3
+++ b/doc/man/man3/slurm_allocate_resources.3
@@ -152,24 +152,24 @@ queued until the partition's limits are changed.
 Always release the response message when no longer required using
 the function \fBslurm_free_resource_allocation_response_msg\fR.  This
 function only makes the request once.  If the allocation is not
-avaliable immediately the node_cnt variable in the resp will be 0.  If
-you want a function that will block until either an error is recieved
+available immediately the node_cnt variable in the resp will be 0.  If
+you want a function that will block until either an error is received
 or an allocation is granted you can use the
 \fIslurm_allocate_resources_blocking\fP function described below.
 .LP
 \fBslurm_allocate_resources_blocking\fR Request a resource allocation for a
 job.  This call will block until the allocation is granted, an error
 occurs, or the specified timeout limit is reached.  The \fIpending_callback\fP
-parameter will be called if the allocation is not avaliable
+parameter will be called if the allocation is not available
 immediately and the immedite flag is not set in the request.  This can
 be used to get the jobid of the job while waiting for the allocation
-to become avaliable.  On failure NULL is returned and errno is set.
+to become available.  On failure NULL is returned and errno is set.
 .LP
 \fBslurm_allocation_msg_thr_create\fR Startup a message handler
 talking with the controller dealing with messages from the controller
 during an allocation. Callback functions are declared in the
 \fIcallbacks\fP parameter and will be called when a corresponding
-message is recieved from the controller.  This message thread is
+message is received from the controller.  This message thread is
 needed to receive messages from the controller about node failure in
 an allocation and other important messages.  Although technically not
 required, it could be very helpful to inform about problems with the
diff --git a/doc/man/man5/bluegene.conf.5 b/doc/man/man5/bluegene.conf.5
index 2bf1634c1e0a6321069569d53fe0a4107e6a3c33..1a016c2058be87ae0b24e4888de0d2181140469d 100644
--- a/doc/man/man5/bluegene.conf.5
+++ b/doc/man/man5/bluegene.conf.5
@@ -9,7 +9,7 @@ The file location can be modified at system build time using the
 DEFAULT_SLURM_CONF parameter. The file will always be located in the
 same directory as the \fBslurm.conf\fP file.
 .LP
-Paramter names are case insensitive.
+Parameter names are case insensitive.
 Any text following a "#" in the configuration file is treated
 as a comment through the end of that line.
 The size of each line in the file is limited to 1024 characters.
diff --git a/doc/man/man5/slurm.conf.5 b/doc/man/man5/slurm.conf.5
index 65c24aedcf079b99ef049b8e46b960635201a632..9cde65cddde8d310cd8ed664d9696b83c9298f15 100644
--- a/doc/man/man5/slurm.conf.5
+++ b/doc/man/man5/slurm.conf.5
@@ -42,6 +42,8 @@ in the \fIslurm.conf\fR file, such as log files and job accounting files,
 may need to be created/owned by the "SlurmUser" uid to be successfully
 accessed.  Use the "chown" and "chmod" commands to set the ownership
 and permissions appropriately.
+See the section \fBFILE AND DIRECTORY PERMISSIONS\fR for information 
+about the various files and directories used by SLURM.
 
 .SH "PARAMETERS"
 .LP
@@ -522,8 +524,13 @@ information only on job termination (reducing SLURM interference with the job).
 
 .TP
 \fBJobCheckpointDir\fR
-Set the default directory used to store job checkpoint files.
-The default value is "/var/slurm/checkpoint".
+Specifies the default directory for storing or reading job checkpoint 
+information. The data stored here is only a few thousand bytes per job 
+and includes information needed to resubmit the job request, not job's
+memory image. The directory must be readable and writable by 
+\fBSlurmUser\fR, but not writable by regular users. The job memory images
+may be in a different location as specified by \fB\-\-checkpoint\-dir\fR
+option at job submit time or scontrol's \fBImageDir\fR option.
 
 .TP
 \fBJobCompHost\fR
@@ -2123,6 +2130,12 @@ specification will utilize this partition.
 Possible values are "YES" and "NO".
 The default value is "NO".
 
+.TP
+\fBDefaultTime\fR
+Run time limit used for jobs that don't specify a value. If not set
+then MaxTime will be used.
+Format is the same as for MaxTime.
+
 .TP
 \fBDisableRootJobs\fR
 If set to "YES" then user root will be prevented from running any jobs
@@ -2156,12 +2169,6 @@ Time resolution is one minute and second values are rounded up to
 the next minute.
 This limit does not apply to jobs executed by SlurmUser or user root.
 
-.TP
-\fBDefaultTime\fR
-Run time limit used for jobs that don't specify a value. If not set
-then MaxTime will be used.
-Format is the same as for MaxTime.
-
 .TP
 \fBMinNodes\fR
 Minimum count of nodes (or base partitions for BlueGene systems) which
@@ -2509,6 +2516,130 @@ PartitionName=batch Nodes=dev[9\-17]  MinNodes=4
 .br
 PartitionName=long Nodes=dev[9\-17] MaxTime=120 AllowGroups=admin
 
+.SH "FILE AND DIRECTORY PERMISSIONS"
+There are three classes of files:
+Files used by \fBslurmctld\fR must be accessible by user \fBSlurmUser\fR
+and accessible by the primary and backup control machines.
+Files used by \fBslurmd\fR must be accessible by user root and 
+accessible from every compute node.
+A few files need to be accessible by normal users on all login and 
+compute nodes.
+While many files and directories are listed below, most of them will 
+not be used with most configurations.
+.TP
+\fBAccountingStorageLoc\fR
+If this specifies a file, it must be writable by user \fBSlurmUser\fR.
+The file must be accessible by the primary and backup control machines.
+It is recommended that the file be readable by all users from login and 
+compute nodes.
+.TP
+\fBEpilog\fR
+Must be executable by user root.
+It is recommended that the file be readable by all users.
+The file must exist on every compute node.
+.TP
+\fBEpilogSlurmctld\fR
+Must be executable by user \fBSlurmUser\fR.
+It is recommended that the file be readable by all users.
+The file must be accessible by the primary and backup control machines.
+.TP
+\fBHealthCheckProgram\fR
+Must be executable by user root.
+It is recommended that the file be readable by all users.
+The file must exist on every compute node.
+.TP
+\fBJobCheckpointDir\fR
+Must be writable by user \fBSlurmUser\fR and no other users.
+The file must be accessible by the primary and backup control machines.
+.TP
+\fBJobCompLoc\fR
+If this specifies a file, it must be writable by user \fBSlurmUser\fR.
+The file must be accessible by the primary and backup control machines.
+.TP
+\fBJobCredentialPrivateKey\fR
+Must be readable only by user \fBSlurmUser\fR and writable by no other users.
+The file must be accessible by the primary and backup control machines.
+.TP
+\fBJobCredentialPublicCertificate\fR
+Readable to all users on all nodes.
+Must not be writable by regular users.
+.TP
+\fBMailProg\fR
+Must be executable by user \fBSlurmUser\fR.
+Must not be writable by regular users.
+The file must be accessible by the primary and backup control machines.
+.TP
+\fBProlog\fR
+Must be executable by user root.
+It is recommended that the file be readable by all users.
+The file must exist on every compute node.
+.TP
+\fBPrologSlurmctld\fR
+Must be executable by user \fBSlurmUser\fR.
+It is recommended that the file be readable by all users.
+The file must be accessible by the primary and backup control machines.
+.TP
+\fBResumeProgram\fR
+Must be executable by user \fBSlurmUser\fR.
+The file must be accessible by the primary and backup control machines.
+.TP
+\fBSallocDefaultCommand\fR
+Must be executable by all users.
+The file must exist on every login and compute node.
+.TP
+\fBslurm.conf\fR
+Readable to all users on all nodes.
+Must not be writable by regular users.
+.TP
+\fBSlurmctldLogFile\fR
+Must be writable by user \fBSlurmUser\fR.
+The file must be accessible by the primary and backup control machines.
+.TP
+\fBSlurmctldPidFile\fR
+Must be writable by user root. 
+Preferably writable and removable by \fBSlurmUser\fR.
+The file must be accessible by the primary and backup control machines.
+.TP
+\fBSlurmdLogFile\fR
+Must be writable by user root.
+A distinct file must exist on each compute node.
+.TP
+\fBSlurmdPidFile\fR
+Must be writable by user root.
+A distinct file must exist on each compute node.
+.TP
+\fBSlurmdSpoolDir\fR
+Must be writable by user root.
+A distinct file must exist on each compute node.
+.TP
+\fBSrunEpilog\fR
+Must be executable by all users.
+The file must exist on every login and compute node.
+.TP
+\fBSrunProlog\fR
+Must be executable by all users.
+The file must exist on every login and compute node.
+.TP
+\fBStateSaveLocation\fR
+Must be writable by user \fBSlurmUser\fR.
+The file must be accessible by the primary and backup control machines.
+.TP
+\fBSuspendProgram\fR
+Must be executable by user \fBSlurmUser\fR.
+The file must be accessible by the primary and backup control machines.
+.TP
+\fBTaskEpilog\fR
+Must be executable by all users.
+The file must exist on every compute node.
+.TP
+\fBTaskProlog\fR
+Must be executable by all users.
+The file must exist on every compute node.
+.TP
+\fBUnkillableStepProgram\fR
+Must be executable by user \fBSlurmUser\fR.
+The file must be accessible by the primary and backup control machines.
+
 .SH "COPYING"
 Copyright (C) 2002\-2007 The Regents of the University of California.
 Copyright (C) 2008\-2009 Lawrence Livermore National Security.
diff --git a/doc/man/man5/topology.conf.5 b/doc/man/man5/topology.conf.5
index ef0c6f1590a4dce5586b1b617b41644c0209da26..cde86ff527dd22280da6f073bd7bb5c6441faa97 100644
--- a/doc/man/man5/topology.conf.5
+++ b/doc/man/man5/topology.conf.5
@@ -10,7 +10,7 @@ The file location can be modified at system build time using the
 DEFAULT_SLURM_CONF parameter. The file will always be located in the
 same directory as the \fBslurm.conf\fP file.
 .LP
-Paramter names are case insensitive.
+Parameter names are case insensitive.
 Any text following a "#" in the configuration file is treated
 as a comment through the end of that line.
 The size of each line in the file is limited to 1024 characters.
diff --git a/doc/man/man5/wiki.conf.5 b/doc/man/man5/wiki.conf.5
index 7248ba70f2cd3ed811b4574d6a8114782a1ace7e..a1b5e6b164f02741d943a5d3b9fc9966b112ec34 100644
--- a/doc/man/man5/wiki.conf.5
+++ b/doc/man/man5/wiki.conf.5
@@ -8,7 +8,7 @@ The file location can be modified at system build time using the
 DEFAULT_SLURM_CONF parameter. The file will always be located in the
 same directory as the \fBslurm.conf\fP file.
 .LP
-Paramter names are case insensitive.
+Parameter names are case insensitive.
 Any text following a "#" in the configuration file is treated
 as a comment through the end of that line.
 The size of each line in the file is limited to 1024 characters.
@@ -126,7 +126,7 @@ Not applicable to wiki plugin, only the wiki2 plugin.
 Job permitted to run directly under SLURM's control
 .TP
 \fBhold\fR
-Hold all incomming jobs until Moab or Maui tell them to run
+Hold all incoming jobs until Moab or Maui tell them to run
 .RE
 
 .SH "EXAMPLE"
diff --git a/slurm.spec b/slurm.spec
index 6597db3d4b1ea7db87d09ec004cf7fcfcff0fa43..a42bd0a409516baf6a787d2af81ad297295028df 100644
--- a/slurm.spec
+++ b/slurm.spec
@@ -1,4 +1,4 @@
-# $Id: slurm.spec 19205 2010-01-05 01:58:11Z da $
+# $Id: slurm.spec 19380 2010-02-02 18:31:55Z da $
 #
 # Note that this package is not relocatable
 
@@ -83,14 +83,14 @@
 %endif
 
 Name:    slurm
-Version: 2.1.1
+Version: 2.1.2
 Release: 1%{?dist}
 
 Summary: Simple Linux Utility for Resource Management
 
 License: GPL
 Group: System Environment/Base
-Source: slurm-2.1.1.tar.bz2
+Source: slurm-2.1.2.tar.bz2
 BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}
 URL: https://computing.llnl.gov/linux/slurm/
 
@@ -350,7 +350,7 @@ Gives the ability for SLURM to use Berkeley Lab Checkpoint/Restart
 #############################################################################
 
 %prep
-%setup -n slurm-2.1.1
+%setup -n slurm-2.1.2
 
 %build
 %configure --program-prefix=%{?_program_prefix:%{_program_prefix}} \
@@ -423,6 +423,9 @@ install -D -m644 etc/federation.conf.example ${RPM_BUILD_ROOT}%{_sysconfdir}/fed
 %if %{slurm_with bluegene}
 rm -f ${RPM_BUILD_ROOT}%{_bindir}/srun
 install -D -m644 etc/bluegene.conf.example ${RPM_BUILD_ROOT}%{_sysconfdir}/bluegene.conf.example
+mkdir -p ${RPM_BUILD_ROOT}/etc/ld.so.conf.d
+echo "%{_libdir}/slurm" > ${RPM_BUILD_ROOT}/etc/ld.so.conf.d/slurm.conf
+chmod 644 ${RPM_BUILD_ROOT}/etc/ld.so.conf.d/slurm.conf
 %endif
 
 LIST=./aix.files
@@ -443,8 +446,6 @@ test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/accounting_storage_mysql.so &&
    echo %{_libdir}/slurm/accounting_storage_mysql.so >> $LIST
 test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/accounting_storage_pgsql.so &&
    echo %{_libdir}/slurm/accounting_storage_pgsql.so >> $LIST
-test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/checkpoint_blcr.so          &&
-   echo %{_libdir}/slurm/checkpoint_blcr.so          >> $LIST
 test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/crypto_openssl.so           &&
    echo %{_libdir}/slurm/crypto_openssl.so           >> $LIST
 test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/jobcomp_mysql.so            &&
@@ -534,6 +535,8 @@ rm -rf $RPM_BUILD_ROOT
 %defattr(-,root,root)
 %{_libdir}/slurm/select_bluegene.so
 %{_libdir}/slurm/libsched_if64.so
+%dir /etc/ld.so.conf.d
+/etc/ld.so.conf.d/slurm.conf
 %{_mandir}/man5/bluegene.*
 %{_sbindir}/slurm_epilog
 %{_sbindir}/slurm_prolog
diff --git a/src/api/Makefile.am b/src/api/Makefile.am
index fbfda5188efda7067d07cb47cd8f4d5be59923d9..44500c7ac6f9473f2aea2f0c4eaf0fe7d80092fa 100644
--- a/src/api/Makefile.am
+++ b/src/api/Makefile.am
@@ -44,6 +44,19 @@ current = $(SLURM_API_CURRENT)
 age     = $(SLURM_API_AGE)
 rev     = $(SLURM_API_REVISION)
 
+# libpmi version informaiton
+#
+# The libpmi interface shouldn't be changing any time soon, so for SLURM's
+# libpmi only the library REVISION and AGE should change (and it is debatable
+# whether these parts of the .so version should change).
+#
+# REVISION is changed if the source of the library has changed so linkers 
+# prefer the new version.
+# AGE is changed if interfaces are added but compatibility is not broken.
+libpmi_current = 0
+libpmi_age     = 0
+libpmi_rev     = 0
+
 lib_LTLIBRARIES = libslurm.la libpmi.la
 
 # BUILT_SOURCES tells automake that when "make" is called, these targets
@@ -123,7 +136,7 @@ libslurm_la_LDFLAGS        = \
 libpmi_la_SOURCES = pmi.c
 libpmi_la_LIBADD = $(top_builddir)/src/api/libslurm.la
 libpmi_la_LDFLAGS = $(LIB_LDFLAGS) \
-        -version-info $(current):$(rev):$(age) \
+        -version-info $(libpmi_current):$(libpmi_rev):$(libpmi_age) \
         $(PMI_OTHER_FLAGS)
 
 # This was made so we chould export all symbols from libcommon
diff --git a/src/api/Makefile.in b/src/api/Makefile.in
index be6c4939a8f0b9e450a657bafc5250274a9a38d1..6691ae1a40a16d008f19a1aecb9cdf9520f46c65 100644
--- a/src/api/Makefile.in
+++ b/src/api/Makefile.in
@@ -382,6 +382,19 @@ INCLUDES = -I$(top_srcdir) $(BG_INCLUDES)
 current = $(SLURM_API_CURRENT)
 age = $(SLURM_API_AGE)
 rev = $(SLURM_API_REVISION)
+
+# libpmi version informaiton
+#
+# The libpmi interface shouldn't be changing any time soon, so for SLURM's
+# libpmi only the library REVISION and AGE should change (and it is debatable
+# whether these parts of the .so version should change).
+#
+# REVISION is changed if the source of the library has changed so linkers 
+# prefer the new version.
+# AGE is changed if interfaces are added but compatibility is not broken.
+libpmi_current = 0
+libpmi_age = 0
+libpmi_rev = 0
 lib_LTLIBRARIES = libslurm.la libpmi.la
 
 # BUILT_SOURCES tells automake that when "make" is called, these targets
@@ -459,7 +472,7 @@ libslurm_la_LDFLAGS = \
 libpmi_la_SOURCES = pmi.c
 libpmi_la_LIBADD = $(top_builddir)/src/api/libslurm.la
 libpmi_la_LDFLAGS = $(LIB_LDFLAGS) \
-        -version-info $(current):$(rev):$(age) \
+        -version-info $(libpmi_current):$(libpmi_rev):$(libpmi_age) \
         $(PMI_OTHER_FLAGS)
 
 
diff --git a/src/api/allocate.c b/src/api/allocate.c
index 9cafa1eb12d15b43df61a521712d720befceb145..b5cfee87ff15e2b79ff0ab3b80e8cee9c8149104 100644
--- a/src/api/allocate.c
+++ b/src/api/allocate.c
@@ -1,6 +1,6 @@
 /*****************************************************************************\
  *  allocate.c - allocate nodes for a job or step with supplied contraints
- *  $Id: allocate.c 19271 2010-01-19 21:00:56Z jette $
+ *  $Id: allocate.c 19303 2010-01-21 22:42:24Z da $
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
@@ -684,7 +684,7 @@ static void _destroy_allocation_response_socket(listen_t *listen)
 }
 
 /* process RPC from slurmctld
- * IN msg: message recieved
+ * IN msg: message received
  * OUT resp: resource allocation response message
  * RET 1 if resp is filled in, 0 otherwise */
 static int
diff --git a/src/api/job_info.c b/src/api/job_info.c
index 5a85d69013643be5c4ccf02979fbf3e2bd58406f..7afc8c30dca7d049ed7984cd4ed5725331e1f427 100644
--- a/src/api/job_info.c
+++ b/src/api/job_info.c
@@ -143,7 +143,7 @@ slurm_sprint_job_info ( job_info_t * job_ptr, int one_liner )
 	uint16_t exit_status = 0, term_sig = 0;
 	job_resources_t *job_resrcs = job_ptr->job_resrcs;
 	char *out = NULL;
-	uint32_t min_nodes, max_nodes;
+	uint32_t min_nodes, max_nodes = 0;
 
 #ifdef HAVE_BG
 	char select_buf[122];
@@ -346,7 +346,7 @@ slurm_sprint_job_info ( job_info_t * job_ptr, int one_liner )
 	if ((min_nodes == 0) || (min_nodes == NO_VAL)) {
 		min_nodes = job_ptr->num_nodes;
 		max_nodes = job_ptr->max_nodes;
-	} else
+	} else if(job_ptr->max_nodes)
 		max_nodes = min_nodes;
 #else
 	snprintf(tmp1, sizeof(tmp1), "%u", job_ptr->num_procs);
diff --git a/src/common/env.c b/src/common/env.c
index 82d16f9005d6f1be11fd6de00c77d0ad50e1b4ea..47244f2a3fdbf0c4e948b295dbe22bd7f2521847 100644
--- a/src/common/env.c
+++ b/src/common/env.c
@@ -1095,9 +1095,6 @@ env_array_for_batch_job(char ***dest, const batch_job_launch_msg_t *batch,
 	env_array_overwrite_fmt(dest, "SLURM_JOBID", "%u", batch->job_id);
 	env_array_overwrite_fmt(dest, "SLURM_NNODES", "%u", num_nodes);
 	env_array_overwrite_fmt(dest, "SLURM_NODELIST", "%s", batch->nodes);
-	if(num_tasks)
-		env_array_overwrite_fmt(dest, "SLURM_NPROCS", "%u",
-					num_tasks);
 
 	if((batch->cpus_per_task != 0) &&
 	   (batch->cpus_per_task != (uint16_t) NO_VAL))
@@ -1109,13 +1106,18 @@ env_array_for_batch_job(char ***dest, const batch_job_launch_msg_t *batch,
 					cpus_per_task);
 	}
 
+	if(num_tasks) {
+		env_array_overwrite_fmt(dest, "SLURM_NPROCS", "%u",
+					num_tasks);
+	} else {
+		num_tasks = num_cpus / cpus_per_task;
+	}
+
 	if((tmp = getenvp(*dest, "SLURM_ARBITRARY_NODELIST"))) {
 		task_dist = SLURM_DIST_ARBITRARY;
-		num_tasks = batch->nprocs;
 	} else {
 		tmp = batch->nodes;
 		task_dist = SLURM_DIST_BLOCK;
-		num_tasks = num_cpus / cpus_per_task;
 	}
 
 	if(!(step_layout = slurm_step_layout_create(tmp,
diff --git a/src/common/forward.h b/src/common/forward.h
index 31e12f8a43350f4b99254191d88dc74f51b252b9..a7ad7d7cd79b2ec1f1205de3a610a59625787a48 100644
--- a/src/common/forward.h
+++ b/src/common/forward.h
@@ -125,7 +125,7 @@ extern void forward_wait(slurm_msg_t *msg);
  */
 /*********************************************************************
 Code taken from common/slurm_protocol_api.c
-//This function should only be used after a message is recieved.
+//This function should only be used after a message is received.
 
 // a call to slurm_receive_msg will fill in a ret_list
 	ret_list = slurm_receive_msg(fd, resp, timeout);
diff --git a/src/common/slurm_protocol_api.c b/src/common/slurm_protocol_api.c
index 7321d78eb852734ac221411da50a7f3715feeb21..990e0a61763669c16fd16a038cd0e495ef0f83b1 100644
--- a/src/common/slurm_protocol_api.c
+++ b/src/common/slurm_protocol_api.c
@@ -2580,7 +2580,7 @@ slurm_fd slurm_listen_stream(slurm_addr * slurm_address)
 }
 
 /* slurm_accept_stream
- * accepts a incomming stream connection on a stream server slurm_fd
+ * accepts a incoming stream connection on a stream server slurm_fd
  * IN open_fd		- file descriptor to accept connection on
  * OUT slurm_address	- slurm_addr of the accepted connection
  * RET slurm_fd		- file descriptor of the accepted connection
diff --git a/src/common/slurm_protocol_api.h b/src/common/slurm_protocol_api.h
index 3258ec32909b2c7831efec19eda1c81f1815007a..f814059dc5596051364ed5ef2fc07ed5ef470aa1 100644
--- a/src/common/slurm_protocol_api.h
+++ b/src/common/slurm_protocol_api.h
@@ -731,7 +731,7 @@ int inline slurm_shutdown_msg_conn(slurm_fd open_fd);
 slurm_fd inline slurm_listen_stream(slurm_addr * slurm_address);
 
 /* slurm_accept_stream
- * accepts a incomming stream connection on a stream server slurm_fd
+ * accepts a incoming stream connection on a stream server slurm_fd
  * IN open_fd		- file descriptor to accept connection on
  * OUT slurm_address 	- slurm_addr of the accepted connection
  * RET slurm_fd		- file descriptor of the accepted connection
diff --git a/src/common/slurm_protocol_interface.h b/src/common/slurm_protocol_interface.h
index 5760a4e271019b30ef42c69e58fb0b6f6659b499..355a4b9c7bd73957c1ba7322eec40966d9a9b9ab 100644
--- a/src/common/slurm_protocol_interface.h
+++ b/src/common/slurm_protocol_interface.h
@@ -196,7 +196,7 @@ int _slurm_close_accepted_conn ( slurm_fd open_fd ) ;
 slurm_fd _slurm_listen_stream ( slurm_addr * slurm_address ) ;
 
 /* _slurm_accept_stream
- * accepts a incomming stream connection on a stream server slurm_fd
+ * accepts a incoming stream connection on a stream server slurm_fd
  * IN open_fd		- file descriptor to accept connection on
  * OUT slurm_address 	- slurm_addr of the accepted connection
  * RET slurm_fd		- file descriptor of the accepted connection
diff --git a/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c b/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c
index 7b7810f7250b69d4e39bcfbfb5aff84265cc3a05..bfe579fbd9ad56af7b51cc554a01c975e8a5c84e 100644
--- a/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c
+++ b/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c
@@ -3859,7 +3859,8 @@ extern int acct_storage_p_add_users(mysql_conn_t *mysql_conn, uint32_t uid,
 	user_name = uid_to_string((uid_t) uid);
 	itr = list_iterator_create(user_list);
 	while((object = list_next(itr))) {
-		if(!object->name || !object->default_acct) {
+		if(!object->name || !object->name[0]
+		   || !object->default_acct || !object->default_acct[0]) {
 			error("We need a user name and "
 			      "default acct to add.");
 			rc = SLURM_ERROR;
@@ -4000,7 +4001,11 @@ extern int acct_storage_p_add_coord(mysql_conn_t *mysql_conn, uint32_t uid,
 	itr = list_iterator_create(user_cond->assoc_cond->user_list);
 	itr2 = list_iterator_create(acct_list);
 	while((user = list_next(itr))) {
+		if(!user[0])
+			continue;
 		while((acct = list_next(itr2))) {
+			if(!acct[0])
+				continue;
 			if(query)
 				xstrfmtcat(query, ", (%d, %d, \"%s\", \"%s\")",
 					   now, now, acct, user);
@@ -4083,8 +4088,9 @@ extern int acct_storage_p_add_accts(mysql_conn_t *mysql_conn, uint32_t uid,
 	user_name = uid_to_string((uid_t) uid);
 	itr = list_iterator_create(acct_list);
 	while((object = list_next(itr))) {
-		if(!object->name || !object->description
-		   || !object->organization) {
+		if(!object->name || !object->name[0]
+		   || !object->description || !object->description[0]
+		   || !object->organization || !object->organization[0]) {
 			error("We need an account name, description, and "
 			      "organization to add. %s %s %s",
 			      object->name, object->description,
@@ -4201,7 +4207,7 @@ extern int acct_storage_p_add_clusters(mysql_conn_t *mysql_conn, uint32_t uid,
 	user_name = uid_to_string((uid_t) uid);
 	itr = list_iterator_create(cluster_list);
 	while((object = list_next(itr))) {
-		if(!object->name) {
+		if(!object->name || !object->name[0]) {
 			error("We need a cluster name to add.");
 			rc = SLURM_ERROR;
 			continue;
@@ -4380,7 +4386,8 @@ extern int acct_storage_p_add_associations(mysql_conn_t *mysql_conn,
 	user_name = uid_to_string((uid_t) uid);
 	itr = list_iterator_create(association_list);
 	while((object = list_next(itr))) {
-		if(!object->cluster || !object->acct) {
+		if(!object->cluster || !object->cluster[0]
+		   || !object->acct || !object->acct[0]) {
 			error("We need a association cluster and "
 			      "acct to add one.");
 			rc = SLURM_ERROR;
@@ -4807,7 +4814,7 @@ extern int acct_storage_p_add_qos(mysql_conn_t *mysql_conn, uint32_t uid,
 	user_name = uid_to_string((uid_t) uid);
 	itr = list_iterator_create(qos_list);
 	while((object = list_next(itr))) {
-		if(!object->name) {
+		if(!object->name || !object->name[0]) {
 			error("We need a qos name to add.");
 			rc = SLURM_ERROR;
 			continue;
@@ -4915,7 +4922,8 @@ extern int acct_storage_p_add_wckeys(mysql_conn_t *mysql_conn, uint32_t uid,
 	user_name = uid_to_string((uid_t) uid);
 	itr = list_iterator_create(wckey_list);
 	while((object = list_next(itr))) {
-		if(!object->cluster || !object->user) {
+		if(!object->cluster || !object->cluster[0]
+		   || !object->user || !object->user[0]) {
 			error("We need a wckey name, cluster, "
 			      "and user to add.");
 			rc = SLURM_ERROR;
@@ -5024,7 +5032,7 @@ extern int acct_storage_p_add_reservation(mysql_conn_t *mysql_conn,
 		error("We need a start time to edit a reservation.");
 		return SLURM_ERROR;
 	}
-	if(!resv->cluster) {
+	if(!resv->cluster || !resv->cluster[0]) {
 		error("We need a cluster name to edit a reservation.");
 		return SLURM_ERROR;
 	}
@@ -6209,7 +6217,7 @@ extern int acct_storage_p_modify_reservation(mysql_conn_t *mysql_conn,
 		error("We need a start time to edit a reservation.");
 		return SLURM_ERROR;
 	}
-	if(!resv->cluster) {
+	if(!resv->cluster || !resv->cluster[0]) {
 		error("We need a cluster name to edit a reservation.");
 		return SLURM_ERROR;
 	}
@@ -6400,6 +6408,8 @@ extern List acct_storage_p_remove_users(mysql_conn_t *mysql_conn, uint32_t uid,
 		xstrcat(extra, " && (");
 		itr = list_iterator_create(user_cond->assoc_cond->user_list);
 		while((object = list_next(itr))) {
+			if(!object[0])
+				continue;
 			if(set)
 				xstrcat(extra, " || ");
 			xstrfmtcat(extra, "name=\"%s\"", object);
@@ -6414,6 +6424,8 @@ extern List acct_storage_p_remove_users(mysql_conn_t *mysql_conn, uint32_t uid,
 		xstrcat(extra, " && (");
 		itr = list_iterator_create(user_cond->def_acct_list);
 		while((object = list_next(itr))) {
+			if(!object[0])
+				continue;
 			if(set)
 				xstrcat(extra, " || ");
 			xstrfmtcat(extra, "default_acct=\"%s\"", object);
@@ -6428,6 +6440,8 @@ extern List acct_storage_p_remove_users(mysql_conn_t *mysql_conn, uint32_t uid,
 		xstrcat(extra, " && (");
 		itr = list_iterator_create(user_cond->def_wckey_list);
 		while((object = list_next(itr))) {
+			if(!object[0])
+				continue;
 			if(set)
 				xstrcat(extra, " || ");
 			xstrfmtcat(extra, "default_wckey=\"%s\"", object);
@@ -6614,6 +6628,8 @@ extern List acct_storage_p_remove_coord(mysql_conn_t *mysql_conn, uint32_t uid,
 
 		itr = list_iterator_create(user_list);
 		while((object = list_next(itr))) {
+			if(!object[0])
+				continue;
 			if(set)
 				xstrcat(extra, " || ");
 			xstrfmtcat(extra, "user=\"%s\"", object);
@@ -6632,6 +6648,8 @@ extern List acct_storage_p_remove_coord(mysql_conn_t *mysql_conn, uint32_t uid,
 
 		itr = list_iterator_create(acct_list);
 		while((object = list_next(itr))) {
+			if(!object[0])
+				continue;
 			if(set)
 				xstrcat(extra, " || ");
 			xstrfmtcat(extra, "acct=\"%s\"", object);
@@ -6762,6 +6780,8 @@ extern List acct_storage_p_remove_accts(mysql_conn_t *mysql_conn, uint32_t uid,
 		xstrcat(extra, " && (");
 		itr = list_iterator_create(acct_cond->assoc_cond->acct_list);
 		while((object = list_next(itr))) {
+			if(!object[0])
+				continue;
 			if(set)
 				xstrcat(extra, " || ");
 			xstrfmtcat(extra, "name=\"%s\"", object);
@@ -6891,6 +6911,8 @@ extern List acct_storage_p_remove_clusters(mysql_conn_t *mysql_conn,
 		xstrcat(extra, " && (");
 		itr = list_iterator_create(cluster_cond->cluster_list);
 		while((object = list_next(itr))) {
+			if(!object[0])
+				continue;
 			if(set)
 				xstrcat(extra, " || ");
 			xstrfmtcat(extra, "name=\"%s\"", object);
@@ -7276,6 +7298,8 @@ extern List acct_storage_p_remove_qos(mysql_conn_t *mysql_conn, uint32_t uid,
 		xstrcat(extra, " && (");
 		itr = list_iterator_create(qos_cond->id_list);
 		while((object = list_next(itr))) {
+			if(!object[0])
+				continue;
 			if(set)
 				xstrcat(extra, " || ");
 			xstrfmtcat(extra, "id=\"%s\"", object);
@@ -7291,6 +7315,8 @@ extern List acct_storage_p_remove_qos(mysql_conn_t *mysql_conn, uint32_t uid,
 		xstrcat(extra, " && (");
 		itr = list_iterator_create(qos_cond->name_list);
 		while((object = list_next(itr))) {
+			if(!object[0])
+				continue;
 			if(set)
 				xstrcat(extra, " || ");
 			xstrfmtcat(extra, "name=\"%s\"", object);
@@ -10403,7 +10429,7 @@ extern int clusteracct_storage_p_get_usage(
 		CLUSTER_COUNT
 	};
 
-	if(!cluster_rec->name) {
+	if(!cluster_rec->name || !cluster_rec->name[0]) {
 		error("We need a cluster name to set data for");
 		return SLURM_ERROR;
 	}
diff --git a/src/plugins/auth/authd/auth_authd.c b/src/plugins/auth/authd/auth_authd.c
index 498043b8846d6848513ece8df71c4889b119b975..eadd9bb27c892aed53c9588a8404023eac177273 100644
--- a/src/plugins/auth/authd/auth_authd.c
+++ b/src/plugins/auth/authd/auth_authd.c
@@ -87,7 +87,7 @@
  * of "auth/".
  *
  * plugin_version   - specifies the version number of the plugin.
- * min_plug_version - specifies the minumum version number of incomming
+ * min_plug_version - specifies the minumum version number of incoming
  *                    messages that this plugin can accept
  */
 const char plugin_name[]        = "Brent Chun's authd authentication plugin";
diff --git a/src/plugins/auth/munge/auth_munge.c b/src/plugins/auth/munge/auth_munge.c
index d53f0b7156a3db5f8a35faf1ac13b767a1d729c0..872f4c7a3ebcbc1b547eba581514c5bcc9660c76 100644
--- a/src/plugins/auth/munge/auth_munge.c
+++ b/src/plugins/auth/munge/auth_munge.c
@@ -1,6 +1,6 @@
 /*****************************************************************************\
  *  auth_munge.c - SLURM auth implementation via Chris Dunlap's Munge
- *  $Id: auth_munge.c 19095 2009-12-01 22:59:18Z da $
+ *  $Id: auth_munge.c 19303 2010-01-21 22:42:24Z da $
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
@@ -97,7 +97,7 @@
  * of "auth/".
  *
  * plugin_version   - specifies the version number of the plugin.
- * min_plug_version - specifies the minumum version number of incomming
+ * min_plug_version - specifies the minumum version number of incoming
  *                    messages that this plugin can accept
  */
 const char plugin_name[]       	= "auth plugin for Munge "
diff --git a/src/plugins/auth/none/auth_none.c b/src/plugins/auth/none/auth_none.c
index 780dae36da4c433b33b80a26f9e5b9d8b8419c41..5e75aa7b16ce320fb0451f78c2429c976e90cb55 100644
--- a/src/plugins/auth/none/auth_none.c
+++ b/src/plugins/auth/none/auth_none.c
@@ -90,7 +90,7 @@
  * of "auth/".
  *
  * plugin_version   - specifies the version number of the plugin.
- * min_plug_version - specifies the minumum version number of incomming
+ * min_plug_version - specifies the minumum version number of incoming
  *                    messages that this plugin can accept
  */
 const char plugin_name[]       	= "Null authentication plugin";
diff --git a/src/plugins/sched/wiki/msg.c b/src/plugins/sched/wiki/msg.c
index b496e16d13b8f8d7cc13268ab50e1b95eb999105..0ffb64b2100c951259575c22f3d32db0bdf74a0d 100644
--- a/src/plugins/sched/wiki/msg.c
+++ b/src/plugins/sched/wiki/msg.c
@@ -572,7 +572,7 @@ static int	_parse_msg(char *msg, char **req)
 
 #if 0
 	/* Old wiki interface does not require checksum
-	 * (actually a cryptographic signature) on incomming
+	 * (actually a cryptographic signature) on incoming
 	 * messages.  NOTE: This is not secure! */
 	if (auth_key[0] != '\0') {
 		char sum[20];	/* format is "CK=%08x08x" */
diff --git a/src/plugins/sched/wiki2/hostlist.c b/src/plugins/sched/wiki2/hostlist.c
index ed2be0f4f316f7eb4a2967572b4d074328170bd4..ed78f17c2be4b17ef84d2055a86d79e86c5b3f71 100644
--- a/src/plugins/sched/wiki2/hostlist.c
+++ b/src/plugins/sched/wiki2/hostlist.c
@@ -168,6 +168,14 @@ static char * _task_list(struct job_record *job_ptr)
 	job_resources_t *job_resrcs_ptr = job_ptr->job_resrcs;
 
 	xassert(job_resrcs_ptr);
+#ifdef HAVE_BG
+	if(job_ptr->node_cnt) {
+		task_cnt = ((job_resrcs_ptr->cpu_array_value[0]
+			     * job_resrcs_ptr->cpu_array_reps[0])
+			    / job_ptr->node_cnt);
+	} else
+		task_cnt = 1;
+#endif
 	for (i=0; i<job_resrcs_ptr->nhosts; i++) {
 		if (i == 0) {
 			xassert(job_resrcs_ptr->cpus &&
@@ -188,6 +196,7 @@ static char * _task_list(struct job_record *job_ptr)
 		}
 		host = node_record_table_ptr[node_inx].name;
 
+#ifndef HAVE_BG
 		task_cnt = job_resrcs_ptr->cpus[i];
 		if (job_ptr->details && job_ptr->details->cpus_per_task)
 			task_cnt /= job_ptr->details->cpus_per_task;
@@ -196,6 +205,7 @@ static char * _task_list(struct job_record *job_ptr)
 			      job_ptr->job_id, host);
 			task_cnt = 1;
 		}
+#endif
 		for (j=0; j<task_cnt; j++) {
 			if (buf)
 				xstrcat(buf, ":");
@@ -267,6 +277,14 @@ static char * _task_list_exp(struct job_record *job_ptr)
 	job_resources_t *job_resrcs_ptr = job_ptr->job_resrcs;
 
 	xassert(job_resrcs_ptr);
+#ifdef HAVE_BG
+	if(job_ptr->node_cnt) {
+		task_cnt = ((job_resrcs_ptr->cpu_array_value[0]
+			     * job_resrcs_ptr->cpu_array_reps[0])
+			    / job_ptr->node_cnt);
+	} else
+		task_cnt = 1;
+#endif
 	for (i=0; i<job_resrcs_ptr->nhosts; i++) {
 		if (i == 0) {
 			xassert(job_resrcs_ptr->cpus &&
@@ -287,6 +305,7 @@ static char * _task_list_exp(struct job_record *job_ptr)
 		}
 		host = node_record_table_ptr[node_inx].name;
 
+#ifndef HAVE_BG
 		task_cnt = job_resrcs_ptr->cpus[i];
 		if (job_ptr->details && job_ptr->details->cpus_per_task)
 			task_cnt /= job_ptr->details->cpus_per_task;
@@ -295,6 +314,7 @@ static char * _task_list_exp(struct job_record *job_ptr)
 			      job_ptr->job_id, host);
 			task_cnt = 1;
 		}
+#endif
 		if (reps == task_cnt) {
 			/* append to existing hostlist record */
 			if (hostlist_push(hl_tmp, host) == 0)
diff --git a/src/plugins/select/bluegene/plugin/bg_job_place.c b/src/plugins/select/bluegene/plugin/bg_job_place.c
index bd8d50eabbb11159dae73091178b096a175408ed..be35075bd3e2786deffa4008411e325bba68bdb5 100644
--- a/src/plugins/select/bluegene/plugin/bg_job_place.c
+++ b/src/plugins/select/bluegene/plugin/bg_job_place.c
@@ -1425,7 +1425,7 @@ static int _remove_preemptables(List block_list, List preempt_jobs)
 /*
  * Try to find resources for a given job request
  * IN job_ptr - pointer to job record in slurmctld
- * IN/OUT bitmap - nodes availble for assignment to job, clear those not to
+ * IN/OUT bitmap - nodes available for assignment to job, clear those not to
  *	be used
  * IN min_nodes, max_nodes  - minimum and maximum number of nodes to allocate
  *	to this job (considers slurm block limits)
diff --git a/src/plugins/select/bluegene/plugin/bg_job_place.h b/src/plugins/select/bluegene/plugin/bg_job_place.h
index 7d0af4bf7fd185175119a63684b5b6b0fe456372..1697a9dd77e5a2ef497879211ab2f0230b788bd9 100644
--- a/src/plugins/select/bluegene/plugin/bg_job_place.h
+++ b/src/plugins/select/bluegene/plugin/bg_job_place.h
@@ -44,7 +44,7 @@
 /*
  * Try to find resources for a given job request
  * IN job_ptr - pointer to job record in slurmctld
- * IN/OUT bitmap - nodes availble for assignment to job, clear those not to
+ * IN/OUT bitmap - nodes available for assignment to job, clear those not to
  *	           be used
  * IN min_nodes, max_nodes  - minimum and maximum number of nodes to allocate
  *	                      to this job (considers slurm partition limits)
diff --git a/src/plugins/select/cons_res/select_cons_res.c b/src/plugins/select/cons_res/select_cons_res.c
index ab528990f77fbad40ac65ff6cbca63795a7d9e7a..8470232f698ff47eaec25154e9044ba05f215a6b 100644
--- a/src/plugins/select/cons_res/select_cons_res.c
+++ b/src/plugins/select/cons_res/select_cons_res.c
@@ -506,6 +506,8 @@ static void _create_part_data()
 			this_ptr = this_ptr->next;
 		}
 	}
+	list_iterator_destroy(part_iterator);
+
 	/* should we sort the select_part_record list by priority here? */
 }
 
diff --git a/src/plugins/task/affinity/task_affinity.c b/src/plugins/task/affinity/task_affinity.c
index 8a914d98a30ed21bf2318cc8b3a378be9f789ca7..70286d939efbc86c9fce5f09b9de1466b63c2b63 100644
--- a/src/plugins/task/affinity/task_affinity.c
+++ b/src/plugins/task/affinity/task_affinity.c
@@ -401,7 +401,7 @@ extern int task_pre_launch (slurmd_job_t *job)
 
 /*
  * task_term() is called after termination of application task.
- *	It is preceeded by --task-epilog (from srun command line)
+ *	It is preceded by --task-epilog (from srun command line)
  *	followed by TaskEpilog program (from slurm.conf).
  */
 extern int task_post_term (slurmd_job_t *job)
diff --git a/src/plugins/task/none/task_none.c b/src/plugins/task/none/task_none.c
index 5ab36e3a12c8e4f479f8a7d3e95bd7c51be28174..2df562bd527d47b12e15f0a5223a75466a2e1c18 100644
--- a/src/plugins/task/none/task_none.c
+++ b/src/plugins/task/none/task_none.c
@@ -182,7 +182,7 @@ extern int task_pre_launch (slurmd_job_t *job)
 
 /*
  * task_term() is called after termination of application task.
- *	It is preceeded by --task-epilog (from srun command line)
+ *	It is preceded by --task-epilog (from srun command line)
  *	followed by TaskEpilog program (from slurm.conf).
  */
 extern int task_post_term (slurmd_job_t *job)
diff --git a/src/sacctmgr/account_functions.c b/src/sacctmgr/account_functions.c
index 1bdac830313c7a7785cf02c00c27a398e85fa944..cb415032e193600eff459f03739961a1d14efd31 100644
--- a/src/sacctmgr/account_functions.c
+++ b/src/sacctmgr/account_functions.c
@@ -735,6 +735,14 @@ extern int sacctmgr_add_account(int argc, char *argv[])
 
 	itr = list_iterator_create(name_list);
 	while((name = list_next(itr))) {
+		if(!name[0]) {
+			exit_code=1;
+			fprintf(stderr, " No blank names are "
+				"allowed when adding.\n");
+			rc = SLURM_ERROR;
+			continue;
+		}
+
 		acct = NULL;
 		if(!sacctmgr_find_account_from_list(local_account_list, name)) {
 			acct = xmalloc(sizeof(acct_account_rec_t));
diff --git a/src/sacctmgr/cluster_functions.c b/src/sacctmgr/cluster_functions.c
index 29e3f980903e3638e99976aae7ecff12fb1e0bb7..a7b25e8e65deabf01ba17bbb4bf064ad689b5cfa 100644
--- a/src/sacctmgr/cluster_functions.c
+++ b/src/sacctmgr/cluster_functions.c
@@ -338,6 +338,13 @@ extern int sacctmgr_add_cluster(int argc, char *argv[])
 	cluster_list = list_create(destroy_acct_cluster_rec);
 	itr = list_iterator_create(name_list);
 	while((name = list_next(itr))) {
+		if(!name[0]) {
+			exit_code=1;
+			fprintf(stderr, " No blank names are "
+				"allowed when adding.\n");
+			rc = SLURM_ERROR;
+			continue;
+		}
 		cluster = xmalloc(sizeof(acct_cluster_rec_t));
 
 		list_append(cluster_list, cluster);
diff --git a/src/sacctmgr/user_functions.c b/src/sacctmgr/user_functions.c
index 49ddb553876b6c6dc2d638db94b003b96f7f86e7..e686b97a7ee67725b5a41d2d0cd7ec3b3bfb1043 100644
--- a/src/sacctmgr/user_functions.c
+++ b/src/sacctmgr/user_functions.c
@@ -1060,10 +1060,18 @@ extern int sacctmgr_add_user(int argc, char *argv[])
 
 	itr = list_iterator_create(assoc_cond->user_list);
 	while((name = list_next(itr))) {
+		if(!name[0]) {
+			exit_code=1;
+			fprintf(stderr, " No blank names are "
+				"allowed when adding.\n");
+			rc = SLURM_ERROR;
+			continue;
+		}
+
 		user = NULL;
 		if(!sacctmgr_find_user_from_list(local_user_list, name)) {
 			uid_t pw_uid;
-			if(!default_acct) {
+			if(!default_acct || !default_acct[0]) {
 				exit_code=1;
 				fprintf(stderr, " Need a default account for "
 				       "these users to add.\n");
@@ -2409,7 +2417,7 @@ extern int sacctmgr_delete_coord(int argc, char *argv[])
 		destroy_acct_user_cond(user_cond);
 		return SLURM_ERROR;
 	}
-	/* FIX ME: This list should be recieved from the slurmdbd not
+	/* FIX ME: This list should be received from the slurmdbd not
 	 * just assumed.  Right now it doesn't do it correctly though.
 	 * This is why we are doing it this way.
 	 */
diff --git a/src/salloc/salloc.c b/src/salloc/salloc.c
index b4b75ac9349ca452cefde6123d11dba36c6076ef..2b05f2078b4a6eaaa052fbae265b3fddb58c25e0 100644
--- a/src/salloc/salloc.c
+++ b/src/salloc/salloc.c
@@ -246,9 +246,9 @@ int main(int argc, char *argv[])
 			      "  Allocation request rescinded.");
 		} else if (opt.immediate &&
 			   ((errno == ETIMEDOUT) ||
+			    (errno == ESLURM_NOT_TOP_PRIORITY) ||
 			    (errno == ESLURM_NODES_BUSY))) {
-			error("Unable to allocate resources: %s",
-			      slurm_strerror(ESLURM_NODES_BUSY));
+			error("Unable to allocate resources: %m");
 			error_exit = immediate_exit;
 		} else {
 			error("Failed to allocate resources: %m");
diff --git a/src/scontrol/scontrol.c b/src/scontrol/scontrol.c
index 9a1e970bac5f10a6da11050298dff1fc80e65596..9c461d8af41b0b5272c1dde0d8b358c157b20006 100644
--- a/src/scontrol/scontrol.c
+++ b/src/scontrol/scontrol.c
@@ -491,7 +491,7 @@ _process_command (int argc, char *argv[])
 		return 0;
 	}
 
-if (strncasecmp (tag, "abort", MAX(taglen, 5)) == 0) {
+	if (strncasecmp (tag, "abort", MAX(taglen, 5)) == 0) {
 		/* require full command name */
 		if (argc > 2) {
 			exit_code = 1;
@@ -642,7 +642,8 @@ if (strncasecmp (tag, "abort", MAX(taglen, 5)) == 0) {
 			if (error_code) {
 				exit_code = 1;
 				if (quiet_flag != 1)
-					slurm_perror ("scontrol_checkpoint error");
+					slurm_perror(
+						"scontrol_checkpoint error");
 			}
 		}
 	}
@@ -737,7 +738,9 @@ if (strncasecmp (tag, "abort", MAX(taglen, 5)) == 0) {
 				if (error_code) {
 					exit_code = 1;
 					if (quiet_flag != 1)
-						slurm_perror ("slurm_set_debug_level error");
+						slurm_perror(
+							"slurm_set_debug_level "
+							"error");
 				}
 			}
 		}
@@ -934,7 +937,7 @@ _delete_it (int argc, char *argv[])
 	}
 
 	/* First identify the entity type to delete */
-	if (strncasecmp (tag, "PartitionName", MAX(taglen, 1)) == 0) {
+	if (strncasecmp (tag, "PartitionName", MAX(taglen, 3)) == 0) {
 		delete_part_msg_t part_msg;
 		part_msg.name = val;
 		if (slurm_delete_partition(&part_msg)) {
@@ -942,7 +945,7 @@ _delete_it (int argc, char *argv[])
 			snprintf(errmsg, 64, "delete_partition %s", argv[0]);
 			slurm_perror(errmsg);
 		}
-	} else if (strncasecmp (tag, "ReservationName", MAX(taglen, 1)) == 0) {
+	} else if (strncasecmp (tag, "ReservationName", MAX(taglen, 3)) == 0) {
 		reservation_name_msg_t   res_msg;
 		res_msg.name = val;
 		if (slurm_delete_reservation(&res_msg)) {
@@ -950,7 +953,7 @@ _delete_it (int argc, char *argv[])
 			snprintf(errmsg, 64, "delete_reservation %s", argv[0]);
 			slurm_perror(errmsg);
 		}
-	} else if (strncasecmp (tag, "BlockName", MAX(taglen, 1)) == 0) {
+	} else if (strncasecmp (tag, "BlockName", MAX(taglen, 3)) == 0) {
 #ifdef HAVE_BG
 		update_block_msg_t   block_msg;
 		slurm_init_update_block_msg ( &block_msg );
@@ -1090,19 +1093,21 @@ _update_it (int argc, char *argv[])
 		taglen = val - argv[i];
 		val++;
 
-		if (strncasecmp (tag, "NodeName", MAX(taglen, 5)) == 0) {
+		if (!strncasecmp(tag, "NodeName", MAX(taglen, 3))) {
 			nodetag=1;
-		} else if (strncasecmp (tag, "PartitionName", MAX(taglen, 3)) == 0) {
+		} else if (!strncasecmp(tag, "PartitionName", MAX(taglen, 3))) {
 			partag=1;
-		} else if (strncasecmp (tag, "JobId", MAX(taglen, 3)) == 0) {
+		} else if (!strncasecmp(tag, "JobId", MAX(taglen, 1))) {
 			jobtag=1;
-		} else if (strncasecmp (tag, "BlockName", MAX(taglen, 3)) == 0) {
+		} else if (!strncasecmp(tag, "BlockName", MAX(taglen, 3))) {
 			blocktag=1;
-		} else if (strncasecmp (tag, "SubBPName", MAX(taglen, 3)) == 0) {
+		} else if (!strncasecmp(tag, "SubBPName", MAX(taglen, 3))) {
 			subtag=1;
-		} else if (strncasecmp (tag, "ReservationName", MAX(taglen, 3)) == 0) {
+		} else if (!strncasecmp(tag, "ReservationName",
+					MAX(taglen, 3))) {
 			restag=1;
-		} else if (strncasecmp (tag, "SlurmctldDebug", MAX(taglen, 3)) == 0) {
+		} else if (!strncasecmp(tag, "SlurmctldDebug",
+					MAX(taglen, 2))) {
 			debugtag=1;
 		}
 	}
diff --git a/src/slurmctld/controller.c b/src/slurmctld/controller.c
index a256de111e25173b4f7868e06ef6e5608364825b..408df3f46a72d9f799d9b0d5b83d911ddaab66ac 100644
--- a/src/slurmctld/controller.c
+++ b/src/slurmctld/controller.c
@@ -1368,14 +1368,20 @@ static void *_slurmctld_background(void *no_data)
 /* save_all_state - save entire slurmctld state for later recovery */
 void save_all_state(void)
 {
+	char *save_loc;
+
 	/* Each of these functions lock their own databases */
 	schedule_job_save();
 	schedule_node_save();
 	schedule_part_save();
 	schedule_resv_save();
 	schedule_trigger_save();
-	select_g_state_save(slurmctld_conf.state_save_location);
-	dump_assoc_mgr_state(slurmctld_conf.state_save_location);
+
+	if ((save_loc = slurm_get_state_save_location())) {
+		select_g_state_save(save_loc);
+		dump_assoc_mgr_state(save_loc);
+		xfree(save_loc);
+	}
 }
 
 /* send all info for the controller to accounting */
diff --git a/src/slurmctld/job_mgr.c b/src/slurmctld/job_mgr.c
index ea723c0b3104ff30bddc0382cf23071c52c6ddb0..5109b9a9215a13c9ed2aa84479d087e74e38e840 100644
--- a/src/slurmctld/job_mgr.c
+++ b/src/slurmctld/job_mgr.c
@@ -1297,6 +1297,7 @@ static int _load_job_details(struct job_record *job_ptr, Buf buffer)
 	job_ptr->details->std_err = err;
 	job_ptr->details->exc_nodes = exc_nodes;
 	job_ptr->details->features = features;
+	(void) build_feature_list(job_ptr);
 	job_ptr->details->std_in = in;
 	job_ptr->details->job_min_cpus = job_min_cpus;
 	job_ptr->details->job_min_memory = job_min_memory;
diff --git a/src/slurmctld/node_mgr.c b/src/slurmctld/node_mgr.c
index ae3ea61cfad76f2b413cb59d149add36e429827b..935ee1be726d33dd9e93e1967503459115580576 100644
--- a/src/slurmctld/node_mgr.c
+++ b/src/slurmctld/node_mgr.c
@@ -863,41 +863,34 @@ int update_node ( update_node_msg_t * update_node_msg )
  */
 extern void restore_node_features(void)
 {
-	int i, j, update_cnt = 0;
+	int i, j;
 	char *node_list;
+	struct node_record *node_ptr1, *node_ptr2;
 
-	/* Identify all nodes that have features field
-	 * preserved and not explicitly set in slurm.conf
-	 * to a different value */
-	for (i=0; i<node_record_count; i++) {
-		if (!node_record_table_ptr[i].features)
-			continue;
-		if (node_record_table_ptr[i].config_ptr->feature) {
-			/* use Features explicitly set in slurm.conf */
-			continue;
-		}
-		update_cnt++;
-	}
-	if (update_cnt == 0)
-		return;
+	for (i=0, node_ptr1=node_record_table_ptr; i<node_record_count; 
+	     i++, node_ptr1++) {
 
-	for (i=0; i<node_record_count; i++) {
-		if (!node_record_table_ptr[i].features)
-			continue;
-		node_list = xstrdup(node_record_table_ptr[i].name);
+		if (!node_ptr1->features)
+			continue;	/* No feature to preserve */
+		if (node_ptr1->config_ptr->feature &&
+		    !strcmp(node_ptr1->config_ptr->feature,
+			    node_ptr1->features)) {
+			continue;	/* Identical feature value */
+		}
 
-		for (j=(i+1); j<node_record_count; j++) {
-			if (!node_record_table_ptr[j].features ||
-			    strcmp(node_record_table_ptr[i].features,
-			    node_record_table_ptr[j].features))
+		node_list = xstrdup(node_ptr1->name);
+		for (j=(i+1), node_ptr2=(node_ptr1+1); j<node_record_count; 
+		     j++, node_ptr2++) {
+			if (!node_ptr2->features ||
+			    strcmp(node_ptr1->features, node_ptr2->features))
 				continue;
 			xstrcat(node_list, ",");
-			xstrcat(node_list, node_record_table_ptr[j].name);
-			xfree(node_record_table_ptr[j].features);
+			xstrcat(node_list, node_ptr2->name);
 		}
-		_update_node_features(node_list,
-			node_record_table_ptr[i].features);
-		xfree(node_record_table_ptr[i].features);
+		error("Node %s Features(%s) differ from slurm.conf",
+		      node_list, node_ptr1->features);
+		_update_node_features(node_list, node_ptr1->features);
+		xfree(node_ptr1->features);
 		xfree(node_list);
 	}
 }
@@ -1063,7 +1056,7 @@ static int _update_node_features(char *node_names, char *features)
 	list_iterator_destroy(config_iterator);
 	bit_free(node_bitmap);
 
-	info("_update_node_features: nodes %s reason set to: %s",
+	info("_update_node_features: nodes %s features set to: %s",
 		node_names, features);
 	return SLURM_SUCCESS;
 }
@@ -1403,10 +1396,11 @@ extern int validate_node_specs(slurm_node_registration_status_msg_t *reg_msg)
 			char time_str[32];
 			last_node_update = now;
 			slurm_make_time_str(&now, time_str, sizeof(time_str));
-			xfree(node_ptr->reason);
-			node_ptr->reason = xstrdup_printf(
-				"Node silently failed and came back [slurm@%s]",
-				time_str);
+			if(!node_ptr->reason)
+				node_ptr->reason = xstrdup_printf(
+					"Node silently failed and "
+					"came back [slurm@%s]",
+					time_str);
 			info("Node %s silently failed and came back",
 			     reg_msg->node_name);
 			_make_node_down(node_ptr, last_node_update);
diff --git a/src/slurmctld/ping_nodes.c b/src/slurmctld/ping_nodes.c
index 079122a2922f4650932d08dd238ef4e052da386b..19c172806a56dee94acb1a8b18094fdaf86f1f41 100644
--- a/src/slurmctld/ping_nodes.c
+++ b/src/slurmctld/ping_nodes.c
@@ -210,7 +210,7 @@ void ping_nodes (void)
 		 * this mechanism avoids an additional (per node) timer or
 		 * counter and gets updated configuration information
 		 * once in a while). We limit these requests since they
-		 * can generate a flood of incomming RPCs. */
+		 * can generate a flood of incoming RPCs. */
 		if (IS_NODE_UNKNOWN(node_ptr) || restart_flag ||
 		    ((i >= offset) && (i < (offset + max_reg_threads)))) {
 			hostlist_push(reg_agent_args->hostlist,
diff --git a/src/slurmctld/proc_req.c b/src/slurmctld/proc_req.c
index f2b771dec5b50b2e166258967d75644d6026680a..c4a8a7fbec9efddfd9095635cbc6ea48295dea2f 100644
--- a/src/slurmctld/proc_req.c
+++ b/src/slurmctld/proc_req.c
@@ -1,5 +1,5 @@
 /*****************************************************************************\
- *  proc_req.c - process incomming messages to slurmctld
+ *  proc_req.c - process incoming messages to slurmctld
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
diff --git a/src/slurmctld/proc_req.h b/src/slurmctld/proc_req.h
index 6810e42d3fbd4b790b12522e21e249fbd959e037..e4ae9d35e4310b5b17238179a31bd93f8f79bf82 100644
--- a/src/slurmctld/proc_req.h
+++ b/src/slurmctld/proc_req.h
@@ -1,5 +1,5 @@
 /*****************************************************************************\
- *  proc_msg.h - process incomming message functions
+ *  proc_msg.h - process incoming message functions
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
diff --git a/src/slurmctld/step_mgr.c b/src/slurmctld/step_mgr.c
index 2dbb96927c220b30501d144a58129f83ed02fca5..88945bf8d5c2e0ceb81b399d50cd98a6f894b090 100644
--- a/src/slurmctld/step_mgr.c
+++ b/src/slurmctld/step_mgr.c
@@ -1545,7 +1545,7 @@ extern slurm_step_layout_t *step_layout_create(struct step_record *step_ptr,
 			  that doesn't have as many cpus as we decided
 			  we needed for each task.  This would result
 			  in not getting a task for the node we
-			  recieved.  This is usually in error.  This
+			  received.  This is usually in error.  This
 			  only happens when the person doesn't specify
 			  how many cpus_per_task they want, and we
 			  have to come up with a number, in this case
@@ -1590,7 +1590,19 @@ static void _pack_ctld_job_step_info(struct step_record *step_ptr, Buf buffer)
 	int task_cnt;
 	char *node_list = NULL;
 	time_t begin_time, run_time;
-
+	bitstr_t *pack_bitstr;
+#ifdef HAVE_FRONT_END
+	/* On front end systems the steps are only
+	 * given the first node to run off of
+	 * so we need to make them appear like
+	 * they are running on the entire
+	 * space (which they really are).
+	 */
+	task_cnt = step_ptr->job_ptr->num_procs;
+	node_list = step_ptr->job_ptr->nodes;
+	pack_bitstr = step_ptr->job_ptr->node_bitmap;
+#else
+	pack_bitstr = step_ptr->step_node_bitmap;
 	if (step_ptr->step_layout) {
 		task_cnt = step_ptr->step_layout->task_cnt;
 		node_list = step_ptr->step_layout->node_list;
@@ -1598,6 +1610,7 @@ static void _pack_ctld_job_step_info(struct step_record *step_ptr, Buf buffer)
 		task_cnt = step_ptr->job_ptr->num_procs;
 		node_list = step_ptr->job_ptr->nodes;
 	}
+#endif
 	pack32(step_ptr->job_ptr->job_id, buffer);
 	pack32(step_ptr->step_id, buffer);
 	pack16(step_ptr->ckpt_interval, buffer);
@@ -1628,9 +1641,8 @@ static void _pack_ctld_job_step_info(struct step_record *step_ptr, Buf buffer)
 	packstr(node_list, buffer);
 	packstr(step_ptr->name, buffer);
 	packstr(step_ptr->network, buffer);
-	pack_bit_fmt(step_ptr->step_node_bitmap, buffer);
+	pack_bit_fmt(pack_bitstr, buffer);
 	packstr(step_ptr->ckpt_dir, buffer);
-
 }
 
 /*
diff --git a/src/slurmdbd/proc_req.c b/src/slurmdbd/proc_req.c
index 3c64e3d6a853f637bc8b9d521f04bd44048c735f..cdfea0f636ab7b903617710f920ff86547a163c0 100644
--- a/src/slurmdbd/proc_req.c
+++ b/src/slurmdbd/proc_req.c
@@ -1548,7 +1548,7 @@ static int _get_wckeys(slurmdbd_conn_t *slurmdbd_conn,
 
 	if((*uid != slurmdbd_conf->slurm_user_id && *uid != 0)
 	   && assoc_mgr_get_admin_level(slurmdbd_conn->db_conn, *uid)
-	   < ACCT_ADMIN_SUPER_USER) {
+	   < ACCT_ADMIN_OPERATOR) {
 		comment = "Your user doesn't have privilege to preform this action";
 		error("%s", comment);
 		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version,
diff --git a/src/sreport/cluster_reports.c b/src/sreport/cluster_reports.c
index 9d6db115103ed732175560d60351d1cfcb913d44..895a38f4e490a33a01b98afa7adc0fb452415589 100644
--- a/src/sreport/cluster_reports.c
+++ b/src/sreport/cluster_reports.c
@@ -1017,7 +1017,7 @@ extern int cluster_user_by_account(int argc, char *argv[])
 					uid = passwd_ptr->pw_uid;
 				/* In this report we are using the sreport user
 				   structure to store the information we want
-				   since it is already avaliable and will do
+				   since it is already available and will do
 				   pretty much what we want.
 				*/
 				sreport_user =
@@ -1288,7 +1288,7 @@ extern int cluster_user_by_wckey(int argc, char *argv[])
 				uid = passwd_ptr->pw_uid;
 			/* In this report we are using the sreport user
 			   structure to store the information we want
-			   since it is already avaliable and will do
+			   since it is already available and will do
 			   pretty much what we want.
 			*/
 			sreport_user =	xmalloc(sizeof(sreport_user_rec_t));
diff --git a/src/srun/opt.c b/src/srun/opt.c
index 7a787e7f1d07f6b91f1bbb3d69bded4198895f52..a223268c7c71b9ac74a3722563ddb364ed6495d5 100644
--- a/src/srun/opt.c
+++ b/src/srun/opt.c
@@ -328,6 +328,8 @@ static void _opt_default()
 	opt.ntasks_per_socket    = NO_VAL;
 	opt.ntasks_per_core      = NO_VAL;
 	opt.nodes_set = false;
+	opt.nodes_set_env = false;
+	opt.nodes_set_opt = false;
 	opt.cpu_bind_type = 0;
 	opt.cpu_bind = NULL;
 	opt.mem_bind_type = 0;
@@ -594,13 +596,15 @@ _process_env_var(env_vars_t *e, const char *val)
 		break;
 
 	case OPT_NODES:
-		opt.nodes_set = get_resource_arg_range( val ,"OPT_NODES",
-							&opt.min_nodes,
-							&opt.max_nodes, false);
-		if (opt.nodes_set == false) {
+		opt.nodes_set_env = get_resource_arg_range( val ,"OPT_NODES",
+							     &opt.min_nodes,
+							     &opt.max_nodes, 
+							     false);
+		if (opt.nodes_set_env == false) {
 			error("\"%s=%s\" -- invalid node count. ignoring...",
 			      e->var, val);
-		}
+		} else
+			opt.nodes_set = opt.nodes_set_env;
 		break;
 
 	case OPT_OVERCOMMIT:
@@ -962,17 +966,18 @@ static void set_options(const int argc, char **argv)
 				_get_int(optarg, "number of tasks", true);
 			break;
 		case (int)'N':
-			opt.nodes_set =
+			opt.nodes_set_opt =
 				get_resource_arg_range( optarg,
 							"requested node count",
 							&opt.min_nodes,
 							&opt.max_nodes, true );
 
-			if (opt.nodes_set == false) {
+			if (opt.nodes_set_opt == false) {
 				error("invalid resource allocation -N `%s'",
 				      optarg);
 				exit(error_exit);
-			}
+			} else
+				opt.nodes_set = opt.nodes_set_opt;
 			break;
 		case (int)'o':
 			if (opt.pty) {
@@ -1649,11 +1654,12 @@ static bool _opt_verify(void)
 	   && (!opt.nodes_set || !opt.nprocs_set)) {
 		hostlist_t hl = hostlist_create(opt.nodelist);
 		if(!opt.nprocs_set) {
-			opt.nprocs_set = 1;
+			opt.nprocs_set = true;
 			opt.nprocs = hostlist_count(hl);
 		}
 		if(!opt.nodes_set) {
-			opt.nodes_set = 1;
+			opt.nodes_set = true;
+			opt.nodes_set_opt = true;
 			hostlist_uniq(hl);
 			opt.min_nodes = opt.max_nodes = hostlist_count(hl);
 		}
@@ -1782,6 +1788,7 @@ static bool _opt_verify(void)
 		else {
 			opt.min_nodes = hl_cnt;
 			opt.nodes_set = true;
+			opt.nodes_set_opt = true;
 		}
 	}
 	if ((opt.nodes_set || opt.extra_set)				&&
@@ -1824,6 +1831,7 @@ static bool _opt_verify(void)
 			else {
 				opt.min_nodes = hl_cnt;
 				opt.nodes_set = true;
+				opt.nodes_set_opt = true;
 			}
 			/* don't destroy hl here since it could be
 			   used later
@@ -1848,6 +1856,7 @@ static bool _opt_verify(void)
 			      opt.nprocs, opt.min_nodes, opt.nprocs);
 
 			opt.min_nodes = opt.nprocs;
+			opt.nodes_set_opt = true;
 			if (opt.max_nodes
 			    &&  (opt.min_nodes > opt.max_nodes) )
 				opt.max_nodes = opt.min_nodes;
diff --git a/src/srun/opt.h b/src/srun/opt.h
index e3762d412b41d348d1333d6a25e53108997e07c4..f0c1213ce46a58d7f097ed9fa5caf8dbcfb5821b 100644
--- a/src/srun/opt.h
+++ b/src/srun/opt.h
@@ -1,6 +1,6 @@
 /*****************************************************************************\
  *  opt.h - definitions for srun option processing
- *  $Id: opt.h 19275 2010-01-19 23:50:05Z jette $
+ *  $Id: opt.h 19316 2010-01-26 00:12:13Z jette $
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
@@ -110,6 +110,9 @@ typedef struct srun_options {
 	mem_bind_type_t mem_bind_type; /* --mem_bind=		*/
 	char *mem_bind;		/* binding map for map/mask_mem	*/
 	bool nodes_set;		/* true if nodes explicitly set */
+	bool nodes_set_env;	/* true if nodes set via SLURM_NNODES */
+	bool nodes_set_opt;	/* true if nodes explicitly set using 
+				 * command line option */
 	bool extra_set;		/* true if extra node info explicitly set */
 	int  time_limit;	/* --time,   -t	(int minutes)	*/
 	char *time_limit_str;	/* --time,   -t (string)	*/
diff --git a/src/srun/srun.c b/src/srun/srun.c
index cce0d907910250556276e4e59b51687b90821367..b073d691c97411de6e0e24df2170fc6e759274e2 100644
--- a/src/srun/srun.c
+++ b/src/srun/srun.c
@@ -275,6 +275,22 @@ int srun(int ac, char **av)
 	} else if ((resp = existing_allocation())) {
 
 		job_id = resp->job_id;
+		if (opt.nodes_set_env && !opt.nodes_set_opt &&
+		    (opt.min_nodes > resp->node_cnt)) {
+			/* This signifies the job used the --no-kill option 
+			 * and a node went DOWN or it used a node count range
+			 * specification, was checkpointed from one size and 
+			 * restarted at a different size */
+			error("SLURM_NNODES environment varariable "
+			      "conflicts with allocated node count (%u!=%u).",
+			      opt.min_nodes, resp->node_cnt);
+			/* Modify options to match resource allocation.
+			 * NOTE: Some options are not supported */
+			opt.min_nodes = resp->node_cnt;
+			xfree(opt.alloc_nodelist);
+			if (!opt.nprocs_set)
+				opt.nprocs = opt.min_nodes;
+		}
 		if (opt.alloc_nodelist == NULL)
                        opt.alloc_nodelist = xstrdup(resp->node_list);
 		if (opt.exclusive)
@@ -283,10 +299,10 @@ int srun(int ac, char **av)
 		job = job_step_create_allocation(resp);
 		slurm_free_resource_allocation_response_msg(resp);
 
-		if (opt.begin != 0)
+		if (opt.begin != 0) {
 			error("--begin is ignored because nodes"
-				" are already allocated.");
-
+			      " are already allocated.");
+		}
 		if (!job || create_job_step(job, false) < 0)
 			exit(error_exit);
 	} else {
diff --git a/src/sview/job_info.c b/src/sview/job_info.c
index 9a9bfa6f715be521396df079bfbff000a6e49c90..9ebb15c3b3af01b3f2fc783de429b452b1fdc572 100644
--- a/src/sview/job_info.c
+++ b/src/sview/job_info.c
@@ -1968,7 +1968,7 @@ static void _layout_step_record(GtkTreeView *treeview,
 		secs2time_str(now_time, tmp_time, sizeof(tmp_time));
 		nodes = step_ptr->nodes;
 #ifdef HAVE_BG
-		convert_num_unit((float)step_ptr->num_tasks,
+		convert_num_unit((float)step_ptr->num_tasks / cpus_per_node,
 				 tmp_char, sizeof(tmp_char), UNIT_NONE);
 #else
 		convert_num_unit((float)_nodes_in_list(nodes),
@@ -2034,7 +2034,7 @@ static void _update_step_record(job_step_info_t *step_ptr,
 	gtk_tree_store_set(treestore, iter, SORTID_UPDATED, 1, -1);
 	if(!step_ptr->nodes
 	   || !strcasecmp(step_ptr->nodes,"waiting...")) {
-		sprintf(tmp_char,"00:00:00");
+		sprintf(tmp_time,"00:00:00");
 		nodes = "waiting...";
 		state = JOB_PENDING;
 	} else {
@@ -2042,7 +2042,7 @@ static void _update_step_record(job_step_info_t *step_ptr,
 		secs2time_str(now_time, tmp_time, sizeof(tmp_time));
 		nodes = step_ptr->nodes;
 #ifdef HAVE_BG
-		convert_num_unit((float)step_ptr->num_tasks,
+		convert_num_unit((float)step_ptr->num_tasks / cpus_per_node,
 				 tmp_char, sizeof(tmp_char), UNIT_NONE);
 #else
 		convert_num_unit((float)_nodes_in_list(nodes),
@@ -2315,9 +2315,6 @@ static List _create_job_info_list(job_info_msg_t *job_info_ptr,
 	char tmp_char[50];
 #endif
 
-#ifdef HAVE_FRONT_END
-	int count = 0;
-#endif
 	if(!changed && info_list) {
 		goto update_color;
 	}
@@ -2357,41 +2354,17 @@ static List _create_job_info_list(job_info_msg_t *job_info_ptr,
 			/* keep a different string here so we don't
 			   just keep tacking on ionodes to a node list */
 			sview_job_info_ptr->nodes = xstrdup(tmp_char);
-		}
+		} else
+			sview_job_info_ptr->nodes = xstrdup(job_ptr->nodes);
+#else
+		sview_job_info_ptr->nodes = xstrdup(job_ptr->nodes);
 #endif
 		if(!sview_job_info_ptr->node_cnt)
 			sview_job_info_ptr->node_cnt = _get_node_cnt(job_ptr);
 
-#ifdef HAVE_FRONT_END
-		/* set this up to copy it if we are on a front end
-		   system */
-		count = 0;
-		while(job_ptr->node_inx[count] != -1)
-			count++;
-		count++; // for the -1;
-#endif
-
 		for(j = 0; j < step_info_ptr->job_step_count; j++) {
 			step_ptr = &(step_info_ptr->job_steps[j]);
 			if(step_ptr->job_id == job_ptr->job_id) {
-#ifdef HAVE_FRONT_END
-				/* On front end systems the steps are only
-				 * given the first node to run off of
-				 * so we need to make them appear like
-				 * they are running on the entire
-				 * space (which they really are).
-				 */
-				xfree(step_ptr->nodes);
-				step_ptr->nodes =
-					xstrdup(sview_job_info_ptr->nodes);
-				step_ptr->num_tasks =
-					sview_job_info_ptr->node_cnt;
-				xfree(step_ptr->node_inx);
-				step_ptr->node_inx =
-					xmalloc(sizeof(int) * count);
-				memcpy(step_ptr->node_inx, job_ptr->node_inx,
-				       sizeof(int) * count);
-#endif
 				list_append(sview_job_info_ptr->step_list,
 					    step_ptr);
 			}
diff --git a/src/sview/part_info.c b/src/sview/part_info.c
index 5833191d2d50f1f7a038f17f4351e1706c5839e8..9deab6e41b7bd4ab3436cd5cf0bd4b31d86c0d9c 100644
--- a/src/sview/part_info.c
+++ b/src/sview/part_info.c
@@ -259,11 +259,11 @@ static void _set_active_combo_part(GtkComboBox *combo,
 
 		break;
 	case SORTID_SHARE:
-		if(!strcmp(temp_char, "yes"))
+		if(!strncmp(temp_char, "force", 5))
 			action = 0;
 		else if(!strcmp(temp_char, "no"))
 			action = 1;
-		else if(!strcmp(temp_char, "force"))
+		else if(!strncmp(temp_char, "yes", 3))
 			action = 2;
 		else if(!strcmp(temp_char, "exclusive"))
 			action = 3;
@@ -307,6 +307,51 @@ end_it:
 
 }
 
+static uint16_t _set_part_share_popup()
+{
+	GtkWidget *table = gtk_table_new(1, 2, FALSE);
+	GtkWidget *label = NULL;
+	GtkObject *adjustment = gtk_adjustment_new(4,
+						   1, 1000,
+						   1, 60,
+						   0);
+	GtkWidget *spin_button =
+		gtk_spin_button_new(GTK_ADJUSTMENT(adjustment), 1, 0);
+	GtkWidget *popup = gtk_dialog_new_with_buttons(
+		"Count",
+		GTK_WINDOW (main_window),
+		GTK_DIALOG_MODAL | GTK_DIALOG_DESTROY_WITH_PARENT,
+		NULL);
+	int response = 0;
+	uint16_t count = 4;
+
+	label = gtk_dialog_add_button(GTK_DIALOG(popup),
+				      GTK_STOCK_OK, GTK_RESPONSE_OK);
+	gtk_window_set_default(GTK_WINDOW(popup), label);
+
+	label = gtk_label_new("Shared Job Count ");
+
+	gtk_container_set_border_width(GTK_CONTAINER(table), 10);
+
+	gtk_box_pack_start(GTK_BOX(GTK_DIALOG(popup)->vbox),
+			   table, FALSE, FALSE, 0);
+
+	gtk_table_attach_defaults(GTK_TABLE(table), label, 0, 1, 0, 1);
+	gtk_table_attach_defaults(GTK_TABLE(table), spin_button, 1, 2, 0, 1);
+
+	gtk_widget_show_all(popup);
+	response = gtk_dialog_run (GTK_DIALOG(popup));
+
+	if (response == GTK_RESPONSE_OK) {
+		count = gtk_spin_button_get_value_as_int(
+			GTK_SPIN_BUTTON(spin_button));
+	}
+
+	gtk_widget_destroy(popup);
+
+	return count;
+}
+
 /* don't free this char */
 static const char *_set_part_msg(update_part_msg_t *part_msg,
 				 const char *new_text,
@@ -384,11 +429,12 @@ static const char *_set_part_msg(update_part_msg_t *part_msg,
 		break;
 	case SORTID_SHARE:
 		if (!strcasecmp(new_text, "yes")) {
-			 part_msg->max_share = 4;
+			part_msg->max_share = _set_part_share_popup();
 		} else if (!strcasecmp(new_text, "exclusive")) {
 			part_msg->max_share = 0;
 		} else if (!strcasecmp(new_text, "force")) {
-			part_msg->max_share = SHARED_FORCE | 4;
+			part_msg->max_share =
+				_set_part_share_popup() | SHARED_FORCE;
 		} else {	/* "no" */
 			part_msg->max_share = 1;
 		}
diff --git a/testsuite/expect/test7.11 b/testsuite/expect/test7.11
index 66f1d911b9f41ec385e5d391f9b0abb13ab3f4c0..5cc9fff1299e8d8d70337816959afe0a901b6d18 100755
--- a/testsuite/expect/test7.11
+++ b/testsuite/expect/test7.11
@@ -52,6 +52,10 @@ if {[test_aix] == 1} {
 	send_user "WARNING: Test is incompatible with AIX\n"
 	exit 0
 }
+if {[test_bluegene] == 1} {
+	send_user "WARNING: Test is incompatible with Blue Gene\n"
+	exit 0
+}
 
 #
 # Build the plugin
diff --git a/testsuite/expect/test8.8 b/testsuite/expect/test8.8
index bd5cf76509eb831fdb7fb515350b3aba73cf0e60..8503431a6d2af6e3d9bd8039efdfde64fd9dbc34 100755
--- a/testsuite/expect/test8.8
+++ b/testsuite/expect/test8.8
@@ -125,7 +125,7 @@ proc get_block_nodes { block } {
 			exp_continue
 		}
 		timeout {
-			send_user "\nFAILURE: smap not responding\n"
+			send_user "\nFAILURE: scontrol not responding\n"
 			slow_kill $my_pid
 			set exit_code 1
 		}
@@ -158,7 +158,7 @@ proc get_first_node { } {
 			exp_continue
 		}
 		timeout {
-			send_user "\nFAILURE: smap not responding\n"
+			send_user "\nFAILURE: scontrol not responding\n"
 			slow_kill $my_pid
 			set exit_code 1
 		}
@@ -189,7 +189,7 @@ proc is_block_in_state { block state } {
 			exp_continue
 		}
 		timeout {
-			send_user "\nFAILURE: smap not responding\n"
+			send_user "\nFAILURE: scontrol not responding\n"
 			slow_kill $my_pid
 			set exit_code 1
 		}
@@ -213,7 +213,7 @@ proc check_node { node error_cnt alloc_cnt } {
 	set alloc_procs [expr $alloc_cnt * $procs_per_cnode]
 	set total_procs 0
 	set match 0
-# 	send_user "really looking for $error_procs $alloc_procs\n"
+#	send_user "really looking for $error_procs $alloc_procs\n"
 	send "$scontrol show node $node\r"
 	expect {
 		-nocase -re "CPUAlloc=$alloc_procs" {
@@ -236,7 +236,7 @@ proc check_node { node error_cnt alloc_cnt } {
 			exp_continue
 		}
 		timeout {
-			send_user "\nFAILURE: smap not responding\n"
+			send_user "\nFAILURE: scontrol not responding\n"
 			set exit_code 1
 		}
 		eof {
@@ -246,7 +246,7 @@ proc check_node { node error_cnt alloc_cnt } {
 
 	if {$match != 3} {
 		send_user "\nFAILURE: scontrol didn't give correct cpu counts $match\n"
-		return 1
+		exit 1
 	}
 
 	set total_cnt [expr ($total_procs / $procs_per_cnode) - $alloc_cnt - $error_cnt]
@@ -317,7 +317,7 @@ proc check_node { node error_cnt alloc_cnt } {
 			exp_continue
 		}
 		timeout {
-			send_user "\nFAILURE: smap not responding\n"
+			send_user "\nFAILURE: sinfo not responding\n"
 			set exit_code 1
 		}
 		eof {
@@ -526,6 +526,10 @@ if {$exit_code} {
 if {![string compare $layout "Dynamic"]} {
 	# lets make 1 ionode in an error state
 	set exit_code [change_subbp_state $first_node "0" "error"]
+	# we need to sleep here for a bit, slurm is a bit too fast to always
+	# have the correct answers here.
+	sleep 1
+
 	#allocate something with that node
 	set block [allocate_and_quit $smallest $first_node $error_block 1]
 
diff --git a/testsuite/slurm_unit/api/manual/node_info-tst.c b/testsuite/slurm_unit/api/manual/node_info-tst.c
index fe8becdca49fd56a8b5294da0fac2fdd885716f0..788f8f7d57b135f1f96d429ca154d24c5e022cae 100644
--- a/testsuite/slurm_unit/api/manual/node_info-tst.c
+++ b/testsuite/slurm_unit/api/manual/node_info-tst.c
@@ -57,7 +57,7 @@ main (int argc, char *argv[])
 		if ((i < 10) || (i % 200 == 0) || 
 		    ((i + 1)  == node_info_msg_ptr-> record_count)) {
 			slurm_print_node_table ( stdout, & node_info_msg_ptr ->
-							 node_array[i], 0 ) ;
+							 node_array[i], 1, 0 ) ;
 		}
 		else if ((i==10) || (i % 200 == 1))
 			printf ("skipping...\n");
diff --git a/testsuite/slurm_unit/api/manual/submit-tst.c b/testsuite/slurm_unit/api/manual/submit-tst.c
index b2b38623e786345d0a36b3f8d7fe82185f000be2..3ebc19eed2a78fce749b4e538ab31199f98f1e0a 100644
--- a/testsuite/slurm_unit/api/manual/submit-tst.c
+++ b/testsuite/slurm_unit/api/manual/submit-tst.c
@@ -46,7 +46,7 @@ main (int argc, char *argv[])
 	slurm_init_job_desc_msg( &job_mesg );
 	job_mesg. contiguous = 1; 
 	job_mesg. name = ("job01");
-	job_mesg. job_min_procs = 1;
+	job_mesg. num_procs = 1;
 	job_mesg. job_min_memory = 100;
 	job_mesg. job_min_tmp_disk = 200;
 	job_mesg. priority = 100;
@@ -55,9 +55,9 @@ main (int argc, char *argv[])
 	job_mesg. min_nodes = 1;
 	job_mesg. user_id = getuid();
 	job_mesg. script = "#!/bin/csh\n/bin/hostname\n";
-	job_mesg. err = "/tmp/slurm.stderr";
-	job_mesg. in = "/tmp/slurm.stdin";
-	job_mesg. out = "/tmp/slurm.stdout";
+	job_mesg. std_err = "/tmp/slurm.stderr";
+	job_mesg. std_in = "/tmp/slurm.stdin";
+	job_mesg. std_out = "/tmp/slurm.stdout";
 	job_mesg. work_dir = "/tmp\0";
 	job_mesg. env_size = 2;
 	env[0] = "SLURM_ENV_0=looking_good";
@@ -83,7 +83,7 @@ main (int argc, char *argv[])
 		slurm_init_job_desc_msg( &job_mesg );
 		job_mesg. contiguous = 1; 
 		job_mesg. name = ("job02+");
-		job_mesg. job_min_procs = 1;
+		job_mesg. num_procs = 1;
 		job_mesg. job_min_memory = 100 + i;
 		job_mesg. job_min_tmp_disk = 200 + i;
 		job_mesg. priority = 100 + i;