From 541cd20f3f1199473bdabdaa63fe58e368c3e09a Mon Sep 17 00:00:00 2001
From: Mehdi Dogguy <mehdi@debian.org>
Date: Mon, 8 Sep 2014 21:32:16 +0200
Subject: [PATCH] Imported Upstream version 1.3.15

---
 META                                          |    4 +-
 NEWS                                          |   82 +-
 auxdir/x_ac_affinity.m4                       |    1 +
 auxdir/x_ac_bluegene.m4                       |    8 +-
 auxdir/x_ac_gtk.m4                            |   26 +-
 configure                                     | 2730 +++++++++--------
 configure.ac                                  |   14 +-
 contribs/Makefile.am                          |    2 +-
 contribs/Makefile.in                          |    2 +-
 contribs/make.slurm.patch                     |    2 +-
 contribs/perlapi/libslurm-perl/Slurm.xs       |    6 +
 contribs/python/hostlist/CHANGES              |   19 +
 contribs/python/hostlist/PKG-INFO             |    4 +-
 contribs/python/hostlist/README               |   28 +-
 contribs/python/hostlist/hostlist.py          |    4 +-
 contribs/python/hostlist/python-hostlist.spec |    4 +-
 contribs/python/hostlist/setup.py             |   31 +-
 contribs/slurmdb-direct/Makefile.am           |   39 +
 contribs/slurmdb-direct/Makefile.in           |  450 +++
 contribs/slurmdb-direct/config.slurmdb.pl     |   15 +
 contribs/slurmdb-direct/moab_2_slurmdb.pl     |  275 ++
 contribs/torque/Makefile.am                   |    2 +-
 contribs/torque/Makefile.in                   |    2 +-
 doc/html/Makefile.am                          |    2 +-
 doc/html/Makefile.in                          |    2 +-
 doc/html/accounting.shtml                     |  284 +-
 doc/html/accounting_storageplugins.shtml      |  885 ++++++
 doc/html/bluegene.shtml                       |   56 +-
 doc/html/documentation.shtml                  |    2 +-
 doc/html/jobacct_storageplugins.shtml         |  204 --
 doc/html/overview.shtml                       |    5 +-
 doc/html/programmer_guide.shtml               |   11 +-
 doc/html/review_release.html                  |    2 +-
 doc/html/team.shtml                           |    5 +-
 doc/man/man1/sacct.1                          |  114 +-
 doc/man/man1/sacctmgr.1                       |    2 +-
 doc/man/man1/salloc.1                         |   33 +
 doc/man/man1/sbatch.1                         |   33 +-
 doc/man/man1/scontrol.1                       |    7 +-
 doc/man/man1/squeue.1                         |    5 +
 doc/man/man1/sreport.1                        |  136 +-
 doc/man/man1/srun.1                           |   19 +-
 doc/man/man5/bluegene.conf.5                  |   75 +-
 doc/man/man5/slurm.conf.5                     |   13 +-
 doc/man/man5/slurmdbd.conf.5                  |   61 +-
 doc/man/man8/spank.8                          |   22 +-
 etc/bluegene.conf.example                     |    2 +
 etc/slurmdbd.conf.example                     |   39 +
 slurm.spec                                    |   25 +-
 slurm/slurm.h.in                              |    6 +-
 slurm/slurm_errno.h                           |    1 +
 src/api/job_info.c                            |   10 +
 src/common/assoc_mgr.c                        |  104 +-
 src/common/checkpoint.c                       |    7 +-
 src/common/env.c                              |   41 +-
 src/common/jobacct_common.c                   |    5 +-
 src/common/mpi.c                              |    6 +-
 src/common/node_select.c                      |   34 +-
 src/common/parse_time.c                       |   45 +
 src/common/parse_time.h                       |    3 +
 src/common/proc_args.c                        |   14 +-
 src/common/read_config.c                      |   37 +-
 src/common/read_config.h                      |   10 +-
 src/common/slurm_accounting_storage.c         |    5 +-
 src/common/slurm_accounting_storage.h         |    2 +-
 src/common/slurm_auth.c                       |    6 +-
 src/common/slurm_cred.c                       |    7 +-
 src/common/slurm_errno.c                      |    2 +
 src/common/slurm_jobacct_gather.c             |    5 +-
 src/common/slurm_jobcomp.c                    |    5 +-
 src/common/slurm_protocol_api.c               |    2 +-
 .../slurm_protocol_socket_implementation.c    |    4 +-
 src/common/slurmdbd_defs.c                    |   35 +-
 src/common/switch.c                           |    5 +-
 src/common/uid.c                              |   20 +-
 src/common/uid.h                              |    8 +-
 src/database/mysql_common.c                   |   55 +-
 src/database/mysql_common.h                   |    1 +
 .../filetxt/accounting_storage_filetxt.c      |    6 +-
 .../filetxt/filetxt_jobacct_process.c         |  105 +-
 .../mysql/accounting_storage_mysql.c          |  369 ++-
 .../mysql/mysql_jobacct_process.c             |   35 +-
 .../accounting_storage/mysql/mysql_rollup.c   |    2 +-
 .../pgsql/accounting_storage_pgsql.c          |    2 +-
 .../pgsql/pgsql_jobacct_process.c             |    2 +
 .../slurmdbd/accounting_storage_slurmdbd.c    |    6 +-
 src/plugins/proctrack/rms/proctrack_rms.c     |    2 +-
 src/plugins/sched/backfill/backfill_wrapper.c |    3 +-
 src/plugins/sched/wiki/job_modify.c           |   28 +-
 src/plugins/sched/wiki/msg.c                  |    4 +-
 src/plugins/sched/wiki/start_job.c            |   14 +
 src/plugins/sched/wiki2/get_jobs.c            |    9 +-
 src/plugins/sched/wiki2/job_modify.c          |   15 +
 src/plugins/sched/wiki2/job_requeue.c         |   26 +-
 src/plugins/sched/wiki2/job_will_run.c        |    3 +-
 src/plugins/sched/wiki2/msg.c                 |   25 +-
 src/plugins/sched/wiki2/start_job.c           |   14 +
 .../block_allocator/block_allocator.c         |  385 ++-
 .../block_allocator/block_allocator.h         |   26 +-
 .../select/bluegene/plugin/bg_block_info.c    |  168 +-
 .../select/bluegene/plugin/bg_job_place.c     |  258 +-
 .../select/bluegene/plugin/bg_job_run.c       |  166 +-
 .../select/bluegene/plugin/bg_job_run.h       |    2 +
 .../bluegene/plugin/bg_record_functions.c     |  703 +++--
 .../bluegene/plugin/bg_record_functions.h     |   20 +-
 .../bluegene/plugin/bg_switch_connections.c   |  342 +--
 .../select/bluegene/plugin/block_sys.c        |  316 +-
 src/plugins/select/bluegene/plugin/bluegene.c |  417 ++-
 src/plugins/select/bluegene/plugin/bluegene.h |   21 +-
 .../select/bluegene/plugin/defined_block.c    |   78 +-
 .../select/bluegene/plugin/dynamic_block.c    |  554 +---
 .../select/bluegene/plugin/dynamic_block.h    |    5 +-
 .../select/bluegene/plugin/select_bluegene.c  |  301 +-
 src/plugins/select/bluegene/plugin/sfree.c    |   14 +-
 .../select/bluegene/plugin/slurm_prolog.c     |    5 +-
 .../select/bluegene/plugin/state_test.c       |  369 ++-
 .../select/bluegene/plugin/state_test.h       |    7 +-
 src/plugins/select/cons_res/select_cons_res.c |   10 +-
 src/plugins/select/linear/select_linear.c     |   32 +-
 src/sacct/options.c                           |   69 +-
 src/sacct/print.c                             |   81 +-
 src/sacct/sacct.c                             |    1 +
 src/sacct/sacct.h                             |    1 +
 src/sacctmgr/account_functions.c              |   18 +-
 src/sacctmgr/association_functions.c          |   15 +-
 src/sacctmgr/cluster_functions.c              |   10 +-
 src/sacctmgr/file_functions.c                 |   20 +-
 src/sacctmgr/sacctmgr.c                       |   12 +-
 src/sacctmgr/user_functions.c                 |   27 +-
 src/salloc/opt.c                              |   56 +-
 src/salloc/salloc.c                           |   12 +-
 src/sbatch/opt.c                              |   30 +-
 src/sinfo/opts.c                              |    2 +-
 src/slurmctld/acct_policy.c                   |   12 +-
 src/slurmctld/controller.c                    |  120 +-
 src/slurmctld/job_mgr.c                       |  284 +-
 src/slurmctld/job_scheduler.c                 |    4 +-
 src/slurmctld/node_scheduler.c                |    5 +-
 src/slurmctld/proc_req.c                      |    3 +-
 src/slurmctld/read_config.c                   |    2 +-
 src/slurmctld/sched_plugin.c                  |    5 +-
 src/slurmctld/slurmctld.h                     |   15 +-
 src/slurmctld/step_mgr.c                      |    4 +-
 src/slurmctld/trigger_mgr.c                   |   17 +-
 src/slurmd/common/proctrack.c                 |    6 +-
 src/slurmd/common/task_plugin.c               |    5 +-
 src/slurmd/slurmd/req.c                       |   29 +-
 src/slurmd/slurmd/slurmd.c                    |   15 +-
 src/slurmd/slurmstepd/mgr.c                   |    3 +-
 src/slurmd/slurmstepd/slurmstepd.c            |    4 +-
 src/slurmd/slurmstepd/task.c                  |   32 +-
 src/slurmdbd/proc_req.c                       |   14 +-
 src/slurmdbd/read_config.c                    |    2 +-
 src/slurmdbd/rpc_mgr.c                        |    2 +-
 src/smap/configure_functions.c                |  228 +-
 src/smap/job_functions.c                      |   10 +-
 src/smap/partition_functions.c                |   18 +
 src/smap/smap.c                               |    4 +-
 src/squeue/opts.c                             |   31 +-
 src/squeue/print.c                            |   21 +-
 src/squeue/sort.c                             |   21 +-
 src/squeue/squeue.c                           |   28 +-
 src/squeue/squeue.h                           |    2 +
 src/sreport/cluster_reports.c                 |   25 +-
 src/sreport/job_reports.c                     |  525 +++-
 src/sreport/job_reports.h                     |    1 +
 src/sreport/sreport.c                         |  111 +-
 src/sreport/user_reports.c                    |   12 +-
 src/srun/opt.c                                |   49 +-
 src/srun/srun.c                               |    1 +
 src/sstat/options.c                           |    7 +-
 src/sstat/sstat.c                             |   18 +-
 src/sstat/sstat.h                             |    1 +
 src/sview/block_info.c                        |   20 +
 src/sview/common.c                            |   75 +-
 testsuite/expect/test1.59                     |    1 -
 testsuite/expect/test12.2                     |   11 +-
 testsuite/expect/test19.5                     |    2 +-
 178 files changed, 8820 insertions(+), 4487 deletions(-)
 create mode 100644 contribs/slurmdb-direct/Makefile.am
 create mode 100644 contribs/slurmdb-direct/Makefile.in
 create mode 100644 contribs/slurmdb-direct/config.slurmdb.pl
 create mode 100755 contribs/slurmdb-direct/moab_2_slurmdb.pl
 create mode 100644 doc/html/accounting_storageplugins.shtml
 delete mode 100644 doc/html/jobacct_storageplugins.shtml
 create mode 100644 etc/slurmdbd.conf.example

diff --git a/META b/META
index 2904e9149..dda32ed2f 100644
--- a/META
+++ b/META
@@ -3,9 +3,9 @@
   Api_revision:  0
   Major:         1
   Meta:          1
-  Micro:         13
+  Micro:         15
   Minor:         3
   Name:          slurm
   Release:       1
   Release_tags:  dist
-  Version:       1.3.13
+  Version:       1.3.15
diff --git a/NEWS b/NEWS
index eaa7b7a0c..b1ae1ded5 100644
--- a/NEWS
+++ b/NEWS
@@ -1,6 +1,81 @@
 This file describes changes in recent versions of SLURM. It primarily
 documents those changes that are of interest to users and admins.
 
+* Changes in SLURM 1.3.16
+=========================
+
+* Changes in SLURM 1.3.15
+=========================
+ -- Fix bug in squeue command with sort on job name ("-S j" option) for jobs
+    that lack a name. Previously generated an invalid memory reference.
+ -- Permit the TaskProlog to write to the job's standard output by writing
+    a line containing the prefix "print " to it's standard output.
+ -- Fix for making the slurmdbd agent thread start up correctly when 
+    stopped and then started again.
+ -- Add squeue option to report jobs by account (-U or --account). Patch from
+    Par Andersson, National Supercomputer Centre, Sweden.
+ -- Add -DNUMA_VERSION1_COMPATIBILITY to Makefile CFLAGS for proper behavior
+    when building with NUMA version 2 APIs.
+ -- BLUEGENE - slurm works on a BGP system.
+ -- BLUEGENE - slurm handles HTC blocks
+ -- BLUEGENE - Added option DenyPassthrough in the bluegene.conf.  Can be set
+    to any combination of X,Y,Z to not allow passthroughs when running in 
+    dynamic layout mode.
+ -- Fix bug in logic to remove a job's dependency, could result in abort.
+ -- Add new error message to sched/wiki and sched/wiki2 (Maui and Moab) for
+    STARTJOB request: "TASKLIST includes non-responsive nodes".
+ -- Fix bug in task layout for heterogeneous nodes and srun --exclusive
+    option.
+ -- Fix bug in select/linear when used with sched/gang that can result in a 
+    job's required or excluded node specification being ignored.
+ -- Add logic to handle message connect timeouts (timed-out.patch from 
+    Chuck Clouston, Bull).
+ -- BLUEGENE - CFLAGS=-m64 is no longer required in configure
+ -- Update python-hostlist code from Kent Engström (NSC) to v1.5
+    - Add hostgrep utility to search for lines matching a hostlist.
+    - Make each "-" on the command line count as one hostlist argument.
+      If multiple hostslists are given on stdin they are combined to a
+      union hostlist before being used in the way requested by the
+      options.
+ -- When using -j option in sacct no user restriction will applied unless
+    specified with the -u option.
+ -- For sched/wiki and sched/wiki2, change logging of wiki message traffic
+    from debug() to debug2(). Only seen if SlurmctldDebug is configured to
+    6 or higher.
+ -- Significant speed up for association based reports in sreport
+ -- BLUEGENE - fix for checking if job can run with downed nodes.  Previously 
+    sbatch etc would tell you node configuration not available now jobs are 
+    accepted but held until nodes are back up.
+ -- Fix in accounting so if any nodes are removed from the system when they 
+    were previously down will be recorded correctly.
+ -- For sched/wiki2 (Moab), add flag to note if job is restartable and
+    prevent deadlock of job requeue fails.
+ -- Modify squeue to return non-zero exit code on failure. Patch from
+    Par Andersson (NSC).
+ -- Correct logic in select/cons_res to allocate a job the maximum node
+    count from a range rather than minimum (e.g. "sbatch -N1-4 my.sh").
+ -- In accounting_storage/filetxt and accounting_storage/pgsql fix 
+    possible invalid memory reference when a job lacks a name.
+ -- Give srun command an exit code of 1 if the prolog fails.
+ -- BLUEGENE - allows for checking nodecard states in the system instead 
+    of midplane state so as to not down an entire midplane if you don't 
+    have to.
+ -- BLUEGENE - fix creation of MESH blocks 
+ -- BLUEGENE - on job cancellation we call jm_cancel_job and then wait until
+    the system cleans up the job.  Before we would send a SIGKILL right 
+    at the beginning. 
+ -- BLUEGENE - if a user specifies a node count that can not be met the job 
+    will be refused instead of before the plugin would search for the next 
+    larger size that could be created.  This prevents users asking for 
+    things that can't be created, and then getting something back they might 
+    not be expecting.
+
+* Changes in SLURM 1.3.14
+=========================
+ -- SECURITY BUG: Fix in sbcast logic that permits users to write files based
+    upon supplimental groups of the slurmd daemon. Similar logic for event
+    triggers if slurmctld is run as user root (not typical).
+
 * Changes in SLURM 1.3.13
 =========================
  -- Added ability for slurmdbd to archive and purge step and/or job records.
@@ -17,7 +92,7 @@ documents those changes that are of interest to users and admins.
     associated with that partition rather than blocking all partitions with
     any overlapping nodes).
  -- Correct logic to log in a job's stderr that it was "CANCELLED DUE TO 
-    NODE FAILURE" rather than just "CANCLLED".
+    NODE FAILURE" rather than just "CANCELLED".
  -- Fix to crypto/openssl plugin that could result in job launch requests
     being spoofed through the use of an improperly formed credential. This bug 
     could permit a user to launch tasks on compute nodes not allocated for 
@@ -160,7 +235,7 @@ documents those changes that are of interest to users and admins.
     given user with a given name will execute with this dependency type.
     From Matthieu Hautreux, CEA.
  -- Updated contribs/python/hostlist to version 1.3: See "CHANGES" file in
-    that directory for details. From Kent Engstrom, NSC.
+    that directory for details. From Kent Engström, NSC.
  -- Add SLURM_JOB_NAME environment variable for jobs submitted using sbatch.
     In order to prevent the job steps from all having the same name as the 
     batch job that spawned them, the SLURM_JOB_NAME environment variable is
@@ -272,6 +347,7 @@ documents those changes that are of interest to users and admins.
     slurmctld daemon and leave the slurmd daemons running.
  -- Do not require JobCredentialPrivateKey or JobCredentialPublicCertificate
     in slurm.conf if using CryptoType=crypto/munge.
+ -- Remove SPANK support from sbatch. 
 
 * Changes in SLURM 1.3.6
 ========================
@@ -3678,4 +3754,4 @@ documents those changes that are of interest to users and admins.
  -- Change directory to /tmp in slurmd if daemonizing.
  -- Logfiles are reopened on reconfigure.
  
-$Id: NEWS 16197 2009-01-13 17:34:26Z jette $
+$Id: NEWS 17225 2009-04-10 19:25:52Z da $
diff --git a/auxdir/x_ac_affinity.m4 b/auxdir/x_ac_affinity.m4
index 3e191de9d..ad2725bf9 100644
--- a/auxdir/x_ac_affinity.m4
+++ b/auxdir/x_ac_affinity.m4
@@ -40,6 +40,7 @@ AC_DEFUN([X_AC_AFFINITY], [
   AM_CONDITIONAL(HAVE_NUMA, test "x$ac_have_numa" = "xyes")
   if test "x$ac_have_numa" = "xyes"; then
     AC_DEFINE(HAVE_NUMA, 1, [define if numa library installed])
+    CFLAGS="-DNUMA_VERSION1_COMPATIBILITY $CFLAGS"
   else
     AC_MSG_WARN([Unable to locate NUMA memory affinity functions])
   fi
diff --git a/auxdir/x_ac_bluegene.m4 b/auxdir/x_ac_bluegene.m4
index 6efe8d976..1d24f5922 100644
--- a/auxdir/x_ac_bluegene.m4
+++ b/auxdir/x_ac_bluegene.m4
@@ -1,5 +1,5 @@
 ##*****************************************************************************
-## $Id: x_ac_bluegene.m4 16156 2009-01-07 20:57:41Z jette $
+## $Id: x_ac_bluegene.m4 16697 2009-02-26 19:49:53Z da $
 ##*****************************************************************************
 #  AUTHOR:
 #    Morris Jette <jette1@llnl.gov>
@@ -80,13 +80,14 @@ AC_DEFUN([X_AC_BGL],
 		# Test to make sure the api is good
                 have_bg_files=yes
       		saved_LDFLAGS="$LDFLAGS"
-      	 	LDFLAGS="$saved_LDFLAGS $bg_ldflags"
+      	 	LDFLAGS="$saved_LDFLAGS $bg_ldflags -m64"
          	AC_LINK_IFELSE([AC_LANG_PROGRAM([[ int rm_set_serial(char *); ]], [[ rm_set_serial(""); ]])],[have_bg_files=yes],[AC_MSG_ERROR(There is a problem linking to the BG/L api.)])
 		LDFLAGS="$saved_LDFLAGS"         	
    	fi
 
   	if test ! -z "$have_bg_files" ; then
       		BG_INCLUDES="$bg_includes"
+	        CFLAGS="$CFLAGS -m64"
       		AC_DEFINE(HAVE_3D, 1, [Define to 1 if 3-dimensional architecture])
       		AC_DEFINE(HAVE_BG, 1, [Define to 1 if emulating or running on Blue Gene system])
       		AC_DEFINE(HAVE_BGL, 1, [Define to 1 if emulating or running on Blue Gene/L system])
@@ -160,13 +161,14 @@ AC_DEFUN([X_AC_BGP],
       		# ac_with_readline="no"
 		# Test to make sure the api is good
                 saved_LDFLAGS="$LDFLAGS"
-      	 	LDFLAGS="$saved_LDFLAGS $bg_ldflags"
+      	 	LDFLAGS="$saved_LDFLAGS $bg_ldflags -m64"
          	AC_LINK_IFELSE([AC_LANG_PROGRAM([[ int rm_set_serial(char *); ]], [[ rm_set_serial(""); ]])],[have_bgp_files=yes],[AC_MSG_ERROR(There is a problem linking to the BG/P api.)])
 		LDFLAGS="$saved_LDFLAGS"         	
    	fi
 
   	if test ! -z "$have_bgp_files" ; then
       		BG_INCLUDES="$bg_includes"
+	        CFLAGS="$CFLAGS -m64"
       		AC_DEFINE(HAVE_3D, 1, [Define to 1 if 3-dimensional architecture])
       		AC_DEFINE(HAVE_BG, 1, [Define to 1 if emulating or running on Blue Gene system])
       		AC_DEFINE(HAVE_BGP, 1, [Define to 1 if emulating or running on Blue Gene/P system])
diff --git a/auxdir/x_ac_gtk.m4 b/auxdir/x_ac_gtk.m4
index 7ab71b2d0..a7cc39f2d 100644
--- a/auxdir/x_ac_gtk.m4
+++ b/auxdir/x_ac_gtk.m4
@@ -18,6 +18,12 @@ AC_DEFUN([X_AC_GTK],
     ac_have_gtk="yes"
     _x_ac_pkcfg_bin="no"
 
+    # use the correct libs if running on 64bit
+    if test -d "/usr/lib64/pkgconfig"; then
+	    PKG_CONFIG_PATH="/usr/lib64/pkgconfig/"
+    fi
+ 
+
 ### Check for pkg-config program
     AC_ARG_WITH(
 	    [pkg-config],
@@ -46,24 +52,20 @@ AC_DEFUN([X_AC_GTK],
 #    fi
 
 
-### Check for gtk2.7.1 package
+### Check for min gtk package
     if test "$ac_have_gtk" == "yes" ; then
         $HAVEPKGCONFIG --exists gtk+-2.0
         if ! test $? -eq 0 ; then
             AC_MSG_WARN([*** gtk+-2.0 is not available.])
             ac_have_gtk="no"
 	else
-	   gtk_config_major_version=`$HAVEPKGCONFIG --modversion gtk+-2.0 | \
-             sed 's/\([[0-9]]*\).\([[0-9]]*\).\([[0-9]]*\)/\1/'`
-    	   gtk_config_minor_version=`$HAVEPKGCONFIG --modversion gtk+-2.0 | \
-             sed 's/\([[0-9]]*\).\([[0-9]]*\).\([[0-9]]*\)/\2/'`
-    	   gtk_config_micro_version=`$HAVEPKGCONFIG --modversion gtk+-2.0 | \
-             sed 's/\([[0-9]]*\).\([[0-9]]*\).\([[0-9]]*\)/\3/'`
-
-	   if test $gtk_config_major_version -lt 2 || test $gtk_config_minor_version -lt 7 || test $gtk_config_micro_version -lt 1; then
-	   	AC_MSG_WARN([*** gtk+-$gtk_config_major_version.$gtk_config_minor_version.$gtk_config_micro_version available, we need >= gtk+-2.7.1 installed for sview.])
-            	ac_have_gtk="no"
-	   fi
+	    min_gtk_version="2.7.1"
+	    $HAVEPKGCONFIG --atleast-version=$min_gtk_version gtk+-2.0
+	    if ! test $? -eq 0 ; then
+		    gtk_config_version=`$HAVEPKGCONFIG --modversion gtk+-2.0`
+		    AC_MSG_WARN([*** gtk+-$gtk_config_version available, we need >= gtk+-$min_gtk_version installed for sview.])
+		    ac_have_gtk="no"
+	    fi
         fi
     fi
 
diff --git a/configure b/configure
index f08a3a160..836c1e323 100755
--- a/configure
+++ b/configure
@@ -852,13 +852,6 @@ am__untar
 MAINTAINER_MODE_TRUE
 MAINTAINER_MODE_FALSE
 MAINT
-CMD_LDFLAGS
-LIB_LDFLAGS
-SO_LDFLAGS
-HAVE_AIX_TRUE
-HAVE_AIX_FALSE
-HAVE_AIX
-PROCTRACKDIR
 CC
 CFLAGS
 LDFLAGS
@@ -875,6 +868,17 @@ AMDEPBACKSLASH
 CCDEPMODE
 am__fastdepCC_TRUE
 am__fastdepCC_FALSE
+BG_INCLUDES
+BLUEGENE_LOADED_TRUE
+BLUEGENE_LOADED_FALSE
+BLUEGENE_LOADED
+CMD_LDFLAGS
+LIB_LDFLAGS
+SO_LDFLAGS
+HAVE_AIX_TRUE
+HAVE_AIX_FALSE
+HAVE_AIX
+PROCTRACKDIR
 CPP
 GREP
 EGREP
@@ -915,10 +919,6 @@ HAVE_UNSETENV_FALSE
 PTHREAD_CC
 PTHREAD_LIBS
 PTHREAD_CFLAGS
-BG_INCLUDES
-BLUEGENE_LOADED_TRUE
-BLUEGENE_LOADED_FALSE
-BLUEGENE_LOADED
 SEMAPHORE_SOURCES
 SEMAPHORE_LIBS
 NCURSES
@@ -1566,8 +1566,12 @@ Optional Features:
   --enable-FEATURE[=ARG]  include FEATURE [ARG=yes]
   --enable-maintainer-mode  enable make rules and dependencies not useful
 			  (and sometimes confusing) to the casual installer
+  --enable-bluegene-emulation
+                          deprecated use --enable-bgl-emulation
+  --enable-bgl-emulation  Run SLURM in BGL mode on a non-bluegene system
   --disable-dependency-tracking  speeds up one-time build
   --enable-dependency-tracking   do not reject slow dependency extractors
+  --enable-bgp-emulation  Run SLURM in BG/P mode on a non-bluegene system
   --disable-largefile     omit support for large files
   --enable-shared[=PKGS]  build shared libraries [default=yes]
   --enable-static[=PKGS]  build static libraries [default=yes]
@@ -1576,10 +1580,6 @@ Optional Features:
   --disable-libtool-lock  avoid locking (might break parallel builds)
   --enable-pam            enable PAM (Pluggable Authentication Modules)
                           support
-  --enable-bluegene-emulation
-                          deprecated use --enable-bgl-emulation
-  --enable-bgl-emulation  Run SLURM in BGL mode on a non-bluegene system
-  --enable-bgp-emulation  Run SLURM in BG/P mode on a non-bluegene system
   --enable-sun-const      enable Sun Constellation system support
   --enable-debug          enable debugging code for development
   --enable-memory-leak-debug
@@ -1594,14 +1594,14 @@ Optional Features:
 Optional Packages:
   --with-PACKAGE[=ARG]    use PACKAGE [ARG=yes]
   --without-PACKAGE       do not use PACKAGE (same as --with-PACKAGE=no)
+  --with-db2-dir=PATH     Specify path to parent directory of DB2 library
+  --with-bg-serial=NAME   set BG_SERIAL value
+
   --with-proctrack=PATH   Specify path to proctrack sources
   --with-gnu-ld           assume the C compiler uses GNU ld [default=no]
   --with-pic              try to use only PIC/non-PIC objects [default=use
                           both]
   --with-tags[=TAGS]      include additional configurations [automatic]
-  --with-db2-dir=PATH     Specify path to parent directory of DB2 library
-  --with-bg-serial=NAME   set BG_SERIAL value
-
   --with-xcpu=PATH        specify path to XCPU directory
   --with-pkg-config=PATH  Specify path to pkg-config binary
   --with-mysql_config=PATH
@@ -3922,137 +3922,533 @@ fi
 
 
 
-ac_ext=c
-ac_cpp='$CPP $CPPFLAGS'
-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
-ac_compiler_gnu=$ac_cv_c_compiler_gnu
-{ echo "$as_me:$LINENO: checking how to run the C preprocessor" >&5
-echo $ECHO_N "checking how to run the C preprocessor... $ECHO_C" >&6; }
-# On Suns, sometimes $CPP names a directory.
-if test -n "$CPP" && test -d "$CPP"; then
-  CPP=
+
+	ac_bluegene_loaded=no
+
+
+# Check whether --with-db2-dir was given.
+if test "${with_db2_dir+set}" = set; then
+  withval=$with_db2_dir;  trydb2dir=$withval
 fi
-if test -z "$CPP"; then
-  if test "${ac_cv_prog_CPP+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
-else
-      # Double quotes because CPP needs to be expanded
-    for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp"
-    do
-      ac_preproc_ok=false
-for ac_c_preproc_warn_flag in '' yes
-do
-  # Use a header file that comes with gcc, so configuring glibc
-  # with a fresh cross-compiler works.
-  # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
-  # <limits.h> exists even on freestanding compilers.
-  # On the NeXT, cc -E runs the code through the compiler's parser,
-  # not just through cpp. "Syntax error" is here to catch this case.
-  cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
+
+
+	# test for bluegene emulation mode
+
+  	# Check whether --enable-bluegene-emulation was given.
+if test "${enable_bluegene_emulation+set}" = set; then
+  enableval=$enable_bluegene_emulation;  case "$enableval" in
+	  yes) bluegene_emulation=yes ;;
+	  no)  bluegene_emulation=no ;;
+	  *)   { { echo "$as_me:$LINENO: error: bad value \"$enableval\" for --enable-bluegene-emulation" >&5
+echo "$as_me: error: bad value \"$enableval\" for --enable-bluegene-emulation" >&2;}
+   { (exit 1); exit 1; }; }  ;;
+    	esac
+fi
+
+
+  	# Check whether --enable-bgl-emulation was given.
+if test "${enable_bgl_emulation+set}" = set; then
+  enableval=$enable_bgl_emulation;  case "$enableval" in
+	  yes) bgl_emulation=yes ;;
+	  no)  bgl_emulation=no ;;
+	  *)   { { echo "$as_me:$LINENO: error: bad value \"$enableval\" for --enable-bgl-emulation" >&5
+echo "$as_me: error: bad value \"$enableval\" for --enable-bgl-emulation" >&2;}
+   { (exit 1); exit 1; }; }  ;;
+    	esac
+fi
+
+
+	if test "x$bluegene_emulation" = "xyes" -o "x$bgl_emulation" = "xyes"; then
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_3D 1
 _ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
-/* end confdefs.h.  */
-#ifdef __STDC__
-# include <limits.h>
-#else
-# include <assert.h>
-#endif
-		     Syntax error
+
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_BG 1
 _ACEOF
-if { (ac_try="$ac_cpp conftest.$ac_ext"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1
-  ac_status=$?
-  grep -v '^ *+' conftest.er1 >conftest.err
-  rm -f conftest.er1
-  cat conftest.err >&5
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } >/dev/null && {
-	 test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" ||
-	 test ! -s conftest.err
-       }; then
-  :
-else
-  echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
 
-  # Broken: fails on valid input.
-continue
-fi
 
-rm -f conftest.err conftest.$ac_ext
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_BGL 1
+_ACEOF
 
-  # OK, works on sane cases.  Now check whether nonexistent headers
-  # can be detected and how.
-  cat >conftest.$ac_ext <<_ACEOF
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_FRONT_END 1
+_ACEOF
+
+    		{ echo "$as_me:$LINENO: Running in BG/L emulation mode" >&5
+echo "$as_me: Running in BG/L emulation mode" >&6;}
+		bg_default_dirs=""
+ 		#define ac_bluegene_loaded so we don't load another bluegene conf
+		ac_bluegene_loaded=yes
+	else
+  	   	bg_default_dirs="/bgl/BlueLight/ppcfloor/bglsys /opt/IBM/db2/V8.1 /u/bgdb2cli/sqllib /home/bgdb2cli/sqllib"
+	fi
+
+   	for bg_dir in $trydb2dir "" $bg_default_dirs; do
+      	# Skip directories that don't exist
+      		if test ! -z "$bg_dir" -a ! -d "$bg_dir" ; then
+         		continue;
+      		fi
+
+      		# Search for required BG API libraries in the directory
+      		if test -z "$have_bg_ar" -a -f "$bg_dir/lib64/libbglbridge.so" ; then
+         		have_bg_ar=yes
+			bg_bridge_so="$bg_dir/lib64/libbglbridge.so"
+       	 		bg_ldflags="$bg_ldflags -L$bg_dir/lib64 -L/usr/lib64 -Wl,--unresolved-symbols=ignore-in-shared-libs -lbglbridge -lbgldb -ltableapi -lbglmachine -lexpat -lsaymessage"
+        	fi
+
+      		# Search for required DB2 library in the directory
+      		if test -z "$have_db2" -a -f "$bg_dir/lib64/libdb2.so" ; then
+         		have_db2=yes
+	 	 	bg_db2_so="$bg_dir/lib64/libdb2.so"
+       	 		bg_ldflags="$bg_ldflags -L$bg_dir/lib64 -ldb2"
+       		fi
+
+      		# Search for headers in the directory
+      		if test -z "$have_bg_hdr" -a -f "$bg_dir/include/rm_api.h" ; then
+         		have_bg_hdr=yes
+         		bg_includes="-I$bg_dir/include"
+      		fi
+   	done
+
+   	if test ! -z "$have_bg_ar" -a ! -z "$have_bg_hdr" -a ! -z "$have_db2" ; then
+      		# ac_with_readline="no"
+		# Test to make sure the api is good
+                have_bg_files=yes
+      		saved_LDFLAGS="$LDFLAGS"
+      	 	LDFLAGS="$saved_LDFLAGS $bg_ldflags -m64"
+         	cat >conftest.$ac_ext <<_ACEOF
 /* confdefs.h.  */
 _ACEOF
 cat confdefs.h >>conftest.$ac_ext
 cat >>conftest.$ac_ext <<_ACEOF
 /* end confdefs.h.  */
-#include <ac_nonexistent.h>
+ int rm_set_serial(char *);
+int
+main ()
+{
+ rm_set_serial("");
+  ;
+  return 0;
+}
 _ACEOF
-if { (ac_try="$ac_cpp conftest.$ac_ext"
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
 case "(($ac_try" in
   *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
   *) ac_try_echo=$ac_try;;
 esac
 eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1
+  (eval "$ac_link") 2>conftest.er1
   ac_status=$?
   grep -v '^ *+' conftest.er1 >conftest.err
   rm -f conftest.er1
   cat conftest.err >&5
   echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } >/dev/null && {
-	 test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" ||
+  (exit $ac_status); } && {
+	 test -z "$ac_c_werror_flag" ||
 	 test ! -s conftest.err
-       }; then
-  # Broken: success on invalid input.
-continue
+       } && test -s conftest$ac_exeext &&
+       $as_test_x conftest$ac_exeext; then
+  have_bg_files=yes
 else
   echo "$as_me: failed program was:" >&5
 sed 's/^/| /' conftest.$ac_ext >&5
 
-  # Passes both tests.
-ac_preproc_ok=:
-break
+	{ { echo "$as_me:$LINENO: error: There is a problem linking to the BG/L api." >&5
+echo "$as_me: error: There is a problem linking to the BG/L api." >&2;}
+   { (exit 1); exit 1; }; }
 fi
 
-rm -f conftest.err conftest.$ac_ext
+rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
+      conftest$ac_exeext conftest.$ac_ext
+		LDFLAGS="$saved_LDFLAGS"
+   	fi
 
-done
-# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
-rm -f conftest.err conftest.$ac_ext
-if $ac_preproc_ok; then
-  break
-fi
+  	if test ! -z "$have_bg_files" ; then
+      		BG_INCLUDES="$bg_includes"
+	        CFLAGS="$CFLAGS -m64"
 
-    done
-    ac_cv_prog_CPP=$CPP
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_3D 1
+_ACEOF
 
-fi
-  CPP=$ac_cv_prog_CPP
-else
-  ac_cv_prog_CPP=$CPP
-fi
-{ echo "$as_me:$LINENO: result: $CPP" >&5
-echo "${ECHO_T}$CPP" >&6; }
-ac_preproc_ok=false
-for ac_c_preproc_warn_flag in '' yes
-do
-  # Use a header file that comes with gcc, so configuring glibc
-  # with a fresh cross-compiler works.
-  # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
-  # <limits.h> exists even on freestanding compilers.
-  # On the NeXT, cc -E runs the code through the compiler's parser,
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_BG 1
+_ACEOF
+
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_BGL 1
+_ACEOF
+
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_FRONT_END 1
+_ACEOF
+
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_BG_FILES 1
+_ACEOF
+
+
+
+cat >>confdefs.h <<_ACEOF
+#define BG_BRIDGE_SO "$bg_bridge_so"
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define BG_DB2_SO "$bg_db2_so"
+_ACEOF
+
+		{ echo "$as_me:$LINENO: checking for BG serial value" >&5
+echo $ECHO_N "checking for BG serial value... $ECHO_C" >&6; }
+      		bg_serial="BGL"
+
+# Check whether --with-bg-serial was given.
+if test "${with_bg_serial+set}" = set; then
+  withval=$with_bg_serial; bg_serial="$withval"
+fi
+
+     		{ echo "$as_me:$LINENO: result: $bg_serial" >&5
+echo "${ECHO_T}$bg_serial" >&6; }
+
+cat >>confdefs.h <<_ACEOF
+#define BG_SERIAL "$bg_serial"
+_ACEOF
+
+ 		#define ac_bluegene_loaded so we don't load another bluegene conf
+		ac_bluegene_loaded=yes
+  	fi
+
+
+
+
+	# test for bluegene emulation mode
+   	# Check whether --enable-bgp-emulation was given.
+if test "${enable_bgp_emulation+set}" = set; then
+  enableval=$enable_bgp_emulation;  case "$enableval" in
+	  yes) bgp_emulation=yes ;;
+	  no)  bgp_emulation=no ;;
+	  *)   { { echo "$as_me:$LINENO: error: bad value \"$enableval\" for --enable-bgp-emulation" >&5
+echo "$as_me: error: bad value \"$enableval\" for --enable-bgp-emulation" >&2;}
+   { (exit 1); exit 1; }; }  ;;
+    	esac
+fi
+
+
+	# Skip if already set
+   	if test "x$ac_bluegene_loaded" = "xyes" ; then
+		bg_default_dirs=""
+	elif test "x$bgp_emulation" = "xyes"; then
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_3D 1
+_ACEOF
+
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_BG 1
+_ACEOF
+
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_BGP 1
+_ACEOF
+
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_FRONT_END 1
+_ACEOF
+
+    		{ echo "$as_me:$LINENO: Running in BG/P emulation mode" >&5
+echo "$as_me: Running in BG/P emulation mode" >&6;}
+		bg_default_dirs=""
+ 		#define ac_bluegene_loaded so we don't load another bluegene conf
+		ac_bluegene_loaded=yes
+	else
+		bg_default_dirs="/bgsys/drivers/ppcfloor"
+	fi
+
+	libname=bgpbridge
+
+   	for bg_dir in $trydb2dir "" $bg_default_dirs; do
+      	# Skip directories that don't exist
+      		if test ! -z "$bg_dir" -a ! -d "$bg_dir" ; then
+         		continue;
+      		fi
+
+		soloc=$bg_dir/lib64/lib$libname.so
+      		# Search for required BG API libraries in the directory
+      		if test -z "$have_bg_ar" -a -f "$soloc" ; then
+         		have_bgp_ar=yes
+			bg_ldflags="$bg_ldflags -L$bg_dir/lib64 -L/usr/lib64 -Wl,--unresolved-symbols=ignore-in-shared-libs -l$libname"
+        	fi
+
+      		# Search for headers in the directory
+      		if test -z "$have_bg_hdr" -a -f "$bg_dir/include/rm_api.h" ; then
+         		have_bgp_hdr=yes
+         		bg_includes="-I$bg_dir/include"
+      		fi
+   	done
+
+   	if test ! -z "$have_bgp_ar" -a ! -z "$have_bgp_hdr" ; then
+      		# ac_with_readline="no"
+		# Test to make sure the api is good
+                saved_LDFLAGS="$LDFLAGS"
+      	 	LDFLAGS="$saved_LDFLAGS $bg_ldflags -m64"
+         	cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h.  */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h.  */
+ int rm_set_serial(char *);
+int
+main ()
+{
+ rm_set_serial("");
+  ;
+  return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+  (eval "$ac_link") 2>conftest.er1
+  ac_status=$?
+  grep -v '^ *+' conftest.er1 >conftest.err
+  rm -f conftest.er1
+  cat conftest.err >&5
+  echo "$as_me:$LINENO: \$? = $ac_status" >&5
+  (exit $ac_status); } && {
+	 test -z "$ac_c_werror_flag" ||
+	 test ! -s conftest.err
+       } && test -s conftest$ac_exeext &&
+       $as_test_x conftest$ac_exeext; then
+  have_bgp_files=yes
+else
+  echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+	{ { echo "$as_me:$LINENO: error: There is a problem linking to the BG/P api." >&5
+echo "$as_me: error: There is a problem linking to the BG/P api." >&2;}
+   { (exit 1); exit 1; }; }
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
+      conftest$ac_exeext conftest.$ac_ext
+		LDFLAGS="$saved_LDFLAGS"
+   	fi
+
+  	if test ! -z "$have_bgp_files" ; then
+      		BG_INCLUDES="$bg_includes"
+	        CFLAGS="$CFLAGS -m64"
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_3D 1
+_ACEOF
+
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_BG 1
+_ACEOF
+
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_BGP 1
+_ACEOF
+
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_FRONT_END 1
+_ACEOF
+
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_BG_FILES 1
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define BG_BRIDGE_SO "$soloc"
+_ACEOF
+
+
+		{ echo "$as_me:$LINENO: checking for BG serial value" >&5
+echo $ECHO_N "checking for BG serial value... $ECHO_C" >&6; }
+        	bg_serial="BGP"
+
+# Check whether --with-bg-serial was given.
+if test "${with_bg_serial+set}" = set; then
+  withval=$with_bg_serial; bg_serial="$withval"
+fi
+
+     		{ echo "$as_me:$LINENO: result: $bg_serial" >&5
+echo "${ECHO_T}$bg_serial" >&6; }
+
+cat >>confdefs.h <<_ACEOF
+#define BG_SERIAL "$bg_serial"
+_ACEOF
+
+ 		#define ac_bluegene_loaded so we don't load another bluegene conf
+		ac_bluegene_loaded=yes
+   	fi
+
+
+
+ if test "x$ac_bluegene_loaded" = "xyes"; then
+  BLUEGENE_LOADED_TRUE=
+  BLUEGENE_LOADED_FALSE='#'
+else
+  BLUEGENE_LOADED_TRUE='#'
+  BLUEGENE_LOADED_FALSE=
+fi
+
+
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+{ echo "$as_me:$LINENO: checking how to run the C preprocessor" >&5
+echo $ECHO_N "checking how to run the C preprocessor... $ECHO_C" >&6; }
+# On Suns, sometimes $CPP names a directory.
+if test -n "$CPP" && test -d "$CPP"; then
+  CPP=
+fi
+if test -z "$CPP"; then
+  if test "${ac_cv_prog_CPP+set}" = set; then
+  echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+      # Double quotes because CPP needs to be expanded
+    for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp"
+    do
+      ac_preproc_ok=false
+for ac_c_preproc_warn_flag in '' yes
+do
+  # Use a header file that comes with gcc, so configuring glibc
+  # with a fresh cross-compiler works.
+  # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+  # <limits.h> exists even on freestanding compilers.
+  # On the NeXT, cc -E runs the code through the compiler's parser,
+  # not just through cpp. "Syntax error" is here to catch this case.
+  cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h.  */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h.  */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+		     Syntax error
+_ACEOF
+if { (ac_try="$ac_cpp conftest.$ac_ext"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+  (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1
+  ac_status=$?
+  grep -v '^ *+' conftest.er1 >conftest.err
+  rm -f conftest.er1
+  cat conftest.err >&5
+  echo "$as_me:$LINENO: \$? = $ac_status" >&5
+  (exit $ac_status); } >/dev/null && {
+	 test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" ||
+	 test ! -s conftest.err
+       }; then
+  :
+else
+  echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+  # Broken: fails on valid input.
+continue
+fi
+
+rm -f conftest.err conftest.$ac_ext
+
+  # OK, works on sane cases.  Now check whether nonexistent headers
+  # can be detected and how.
+  cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h.  */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h.  */
+#include <ac_nonexistent.h>
+_ACEOF
+if { (ac_try="$ac_cpp conftest.$ac_ext"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+  (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1
+  ac_status=$?
+  grep -v '^ *+' conftest.er1 >conftest.err
+  rm -f conftest.er1
+  cat conftest.err >&5
+  echo "$as_me:$LINENO: \$? = $ac_status" >&5
+  (exit $ac_status); } >/dev/null && {
+	 test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" ||
+	 test ! -s conftest.err
+       }; then
+  # Broken: success on invalid input.
+continue
+else
+  echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+  # Passes both tests.
+ac_preproc_ok=:
+break
+fi
+
+rm -f conftest.err conftest.$ac_ext
+
+done
+# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
+rm -f conftest.err conftest.$ac_ext
+if $ac_preproc_ok; then
+  break
+fi
+
+    done
+    ac_cv_prog_CPP=$CPP
+
+fi
+  CPP=$ac_cv_prog_CPP
+else
+  ac_cv_prog_CPP=$CPP
+fi
+{ echo "$as_me:$LINENO: result: $CPP" >&5
+echo "${ECHO_T}$CPP" >&6; }
+ac_preproc_ok=false
+for ac_c_preproc_warn_flag in '' yes
+do
+  # Use a header file that comes with gcc, so configuring glibc
+  # with a fresh cross-compiler works.
+  # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+  # <limits.h> exists even on freestanding compilers.
+  # On the NeXT, cc -E runs the code through the compiler's parser,
   # not just through cpp. "Syntax error" is here to catch this case.
   cat >conftest.$ac_ext <<_ACEOF
 /* confdefs.h.  */
@@ -7128,7 +7524,7 @@ ia64-*-hpux*)
   ;;
 *-*-irix6*)
   # Find out which ABI we are using.
-  echo '#line 7131 "configure"' > conftest.$ac_ext
+  echo '#line 7527 "configure"' > conftest.$ac_ext
   if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
   (eval $ac_compile) 2>&5
   ac_status=$?
@@ -9234,11 +9630,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:9237: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:9633: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>conftest.err)
    ac_status=$?
    cat conftest.err >&5
-   echo "$as_me:9241: \$? = $ac_status" >&5
+   echo "$as_me:9637: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s "$ac_outfile"; then
      # The compiler can only warn and ignore the option if not recognized
      # So say no if there are warnings other than the usual output.
@@ -9524,11 +9920,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:9527: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:9923: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>conftest.err)
    ac_status=$?
    cat conftest.err >&5
-   echo "$as_me:9531: \$? = $ac_status" >&5
+   echo "$as_me:9927: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s "$ac_outfile"; then
      # The compiler can only warn and ignore the option if not recognized
      # So say no if there are warnings other than the usual output.
@@ -9628,11 +10024,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:9631: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:10027: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>out/conftest.err)
    ac_status=$?
    cat out/conftest.err >&5
-   echo "$as_me:9635: \$? = $ac_status" >&5
+   echo "$as_me:10031: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s out/conftest2.$ac_objext
    then
      # The compiler can only warn and ignore the option if not recognized
@@ -12005,7 +12401,7 @@ else
   lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
   lt_status=$lt_dlunknown
   cat > conftest.$ac_ext <<EOF
-#line 12008 "configure"
+#line 12404 "configure"
 #include "confdefs.h"
 
 #if HAVE_DLFCN_H
@@ -12105,7 +12501,7 @@ else
   lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
   lt_status=$lt_dlunknown
   cat > conftest.$ac_ext <<EOF
-#line 12108 "configure"
+#line 12504 "configure"
 #include "confdefs.h"
 
 #if HAVE_DLFCN_H
@@ -14506,11 +14902,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:14509: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:14905: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>conftest.err)
    ac_status=$?
    cat conftest.err >&5
-   echo "$as_me:14513: \$? = $ac_status" >&5
+   echo "$as_me:14909: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s "$ac_outfile"; then
      # The compiler can only warn and ignore the option if not recognized
      # So say no if there are warnings other than the usual output.
@@ -14610,11 +15006,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:14613: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:15009: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>out/conftest.err)
    ac_status=$?
    cat out/conftest.err >&5
-   echo "$as_me:14617: \$? = $ac_status" >&5
+   echo "$as_me:15013: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s out/conftest2.$ac_objext
    then
      # The compiler can only warn and ignore the option if not recognized
@@ -16208,11 +16604,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:16211: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:16607: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>conftest.err)
    ac_status=$?
    cat conftest.err >&5
-   echo "$as_me:16215: \$? = $ac_status" >&5
+   echo "$as_me:16611: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s "$ac_outfile"; then
      # The compiler can only warn and ignore the option if not recognized
      # So say no if there are warnings other than the usual output.
@@ -16312,11 +16708,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:16315: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:16711: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>out/conftest.err)
    ac_status=$?
    cat out/conftest.err >&5
-   echo "$as_me:16319: \$? = $ac_status" >&5
+   echo "$as_me:16715: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s out/conftest2.$ac_objext
    then
      # The compiler can only warn and ignore the option if not recognized
@@ -18532,11 +18928,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:18535: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:18931: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>conftest.err)
    ac_status=$?
    cat conftest.err >&5
-   echo "$as_me:18539: \$? = $ac_status" >&5
+   echo "$as_me:18935: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s "$ac_outfile"; then
      # The compiler can only warn and ignore the option if not recognized
      # So say no if there are warnings other than the usual output.
@@ -18822,11 +19218,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:18825: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:19221: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>conftest.err)
    ac_status=$?
    cat conftest.err >&5
-   echo "$as_me:18829: \$? = $ac_status" >&5
+   echo "$as_me:19225: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s "$ac_outfile"; then
      # The compiler can only warn and ignore the option if not recognized
      # So say no if there are warnings other than the usual output.
@@ -18926,11 +19322,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:18929: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:19325: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>out/conftest.err)
    ac_status=$?
    cat out/conftest.err >&5
-   echo "$as_me:18933: \$? = $ac_status" >&5
+   echo "$as_me:19329: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s out/conftest2.$ac_objext
    then
      # The compiler can only warn and ignore the option if not recognized
@@ -22688,6 +23084,7 @@ cat >>confdefs.h <<\_ACEOF
 #define HAVE_NUMA 1
 _ACEOF
 
+    CFLAGS="-DNUMA_VERSION1_COMPATIBILITY $CFLAGS"
   else
     { echo "$as_me:$LINENO: WARNING: Unable to locate NUMA memory affinity functions" >&5
 echo "$as_me: WARNING: Unable to locate NUMA memory affinity functions" >&2;}
@@ -23137,296 +23534,7 @@ sed 's/^/| /' conftest.$ac_ext >&5
 
 fi
 
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-else
-  cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
-/* end confdefs.h.  */
-$ac_includes_default
-int
-main ()
-{
-
-  /* Are we little or big endian?  From Harbison&Steele.  */
-  union
-  {
-    long int l;
-    char c[sizeof (long int)];
-  } u;
-  u.l = 1;
-  return u.c[sizeof (long int) - 1] == 1;
-
-  ;
-  return 0;
-}
-_ACEOF
-rm -f conftest$ac_exeext
-if { (ac_try="$ac_link"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_link") 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } && { ac_try='./conftest$ac_exeext'
-  { (case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_try") 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }; }; then
-  ac_cv_c_bigendian=no
-else
-  echo "$as_me: program exited with status $ac_status" >&5
-echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-( exit $ac_status )
-ac_cv_c_bigendian=yes
-fi
-rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext
-fi
-
-
-fi
-
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-fi
-{ echo "$as_me:$LINENO: result: $ac_cv_c_bigendian" >&5
-echo "${ECHO_T}$ac_cv_c_bigendian" >&6; }
-case $ac_cv_c_bigendian in
-  yes)
-
-cat >>confdefs.h <<\_ACEOF
-#define WORDS_BIGENDIAN 1
-_ACEOF
- ;;
-  no)
-     ;;
-  *)
-    { { echo "$as_me:$LINENO: error: unknown endianness
-presetting ac_cv_c_bigendian=no (or yes) will help" >&5
-echo "$as_me: error: unknown endianness
-presetting ac_cv_c_bigendian=no (or yes) will help" >&2;}
-   { (exit 1); exit 1; }; } ;;
-esac
-
-  if test "x$ac_cv_c_bigendian" = "xyes"; then
-
-cat >>confdefs.h <<\_ACEOF
-#define SLURM_BIGENDIAN 1
-_ACEOF
-
-  fi
-
-
-if test $ac_cv_c_compiler_gnu = yes; then
-    { echo "$as_me:$LINENO: checking whether $CC needs -traditional" >&5
-echo $ECHO_N "checking whether $CC needs -traditional... $ECHO_C" >&6; }
-if test "${ac_cv_prog_gcc_traditional+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
-else
-    ac_pattern="Autoconf.*'x'"
-  cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
-/* end confdefs.h.  */
-#include <sgtty.h>
-Autoconf TIOCGETP
-_ACEOF
-if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
-  $EGREP "$ac_pattern" >/dev/null 2>&1; then
-  ac_cv_prog_gcc_traditional=yes
-else
-  ac_cv_prog_gcc_traditional=no
-fi
-rm -f conftest*
-
-
-  if test $ac_cv_prog_gcc_traditional = no; then
-    cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
-/* end confdefs.h.  */
-#include <termio.h>
-Autoconf TCGETA
-_ACEOF
-if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
-  $EGREP "$ac_pattern" >/dev/null 2>&1; then
-  ac_cv_prog_gcc_traditional=yes
-fi
-rm -f conftest*
-
-  fi
-fi
-{ echo "$as_me:$LINENO: result: $ac_cv_prog_gcc_traditional" >&5
-echo "${ECHO_T}$ac_cv_prog_gcc_traditional" >&6; }
-  if test $ac_cv_prog_gcc_traditional = yes; then
-    CC="$CC -traditional"
-  fi
-fi
-
-
-
-
-for ac_header in stdlib.h
-do
-as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh`
-if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
-  { echo "$as_me:$LINENO: checking for $ac_header" >&5
-echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6; }
-if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
-fi
-ac_res=`eval echo '${'$as_ac_Header'}'`
-	       { echo "$as_me:$LINENO: result: $ac_res" >&5
-echo "${ECHO_T}$ac_res" >&6; }
-else
-  # Is the header compilable?
-{ echo "$as_me:$LINENO: checking $ac_header usability" >&5
-echo $ECHO_N "checking $ac_header usability... $ECHO_C" >&6; }
-cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
-/* end confdefs.h.  */
-$ac_includes_default
-#include <$ac_header>
-_ACEOF
-rm -f conftest.$ac_objext
-if { (ac_try="$ac_compile"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_compile") 2>conftest.er1
-  ac_status=$?
-  grep -v '^ *+' conftest.er1 >conftest.err
-  rm -f conftest.er1
-  cat conftest.err >&5
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } && {
-	 test -z "$ac_c_werror_flag" ||
-	 test ! -s conftest.err
-       } && test -s conftest.$ac_objext; then
-  ac_header_compiler=yes
-else
-  echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-	ac_header_compiler=no
-fi
-
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-{ echo "$as_me:$LINENO: result: $ac_header_compiler" >&5
-echo "${ECHO_T}$ac_header_compiler" >&6; }
-
-# Is the header present?
-{ echo "$as_me:$LINENO: checking $ac_header presence" >&5
-echo $ECHO_N "checking $ac_header presence... $ECHO_C" >&6; }
-cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
-/* end confdefs.h.  */
-#include <$ac_header>
-_ACEOF
-if { (ac_try="$ac_cpp conftest.$ac_ext"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1
-  ac_status=$?
-  grep -v '^ *+' conftest.er1 >conftest.err
-  rm -f conftest.er1
-  cat conftest.err >&5
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } >/dev/null && {
-	 test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" ||
-	 test ! -s conftest.err
-       }; then
-  ac_header_preproc=yes
-else
-  echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-  ac_header_preproc=no
-fi
-
-rm -f conftest.err conftest.$ac_ext
-{ echo "$as_me:$LINENO: result: $ac_header_preproc" >&5
-echo "${ECHO_T}$ac_header_preproc" >&6; }
-
-# So?  What about this header?
-case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in
-  yes:no: )
-    { echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5
-echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;}
-    { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5
-echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;}
-    ac_header_preproc=yes
-    ;;
-  no:yes:* )
-    { echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5
-echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;}
-    { echo "$as_me:$LINENO: WARNING: $ac_header:     check for missing prerequisite headers?" >&5
-echo "$as_me: WARNING: $ac_header:     check for missing prerequisite headers?" >&2;}
-    { echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5
-echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;}
-    { echo "$as_me:$LINENO: WARNING: $ac_header:     section \"Present But Cannot Be Compiled\"" >&5
-echo "$as_me: WARNING: $ac_header:     section \"Present But Cannot Be Compiled\"" >&2;}
-    { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5
-echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;}
-    { echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5
-echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;}
-
-    ;;
-esac
-{ echo "$as_me:$LINENO: checking for $ac_header" >&5
-echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6; }
-if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
-else
-  eval "$as_ac_Header=\$ac_header_preproc"
-fi
-ac_res=`eval echo '${'$as_ac_Header'}'`
-	       { echo "$as_me:$LINENO: result: $ac_res" >&5
-echo "${ECHO_T}$ac_res" >&6; }
-
-fi
-if test `eval echo '${'$as_ac_Header'}'` = yes; then
-  cat >>confdefs.h <<_ACEOF
-#define `echo "HAVE_$ac_header" | $as_tr_cpp` 1
-_ACEOF
-
-fi
-
-done
-
-{ echo "$as_me:$LINENO: checking for GNU libc compatible malloc" >&5
-echo $ECHO_N "checking for GNU libc compatible malloc... $ECHO_C" >&6; }
-if test "${ac_cv_func_malloc_0_nonnull+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
-else
-  if test "$cross_compiling" = yes; then
-  ac_cv_func_malloc_0_nonnull=no
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
 else
   cat >conftest.$ac_ext <<_ACEOF
 /* confdefs.h.  */
@@ -23434,16 +23542,20 @@ _ACEOF
 cat confdefs.h >>conftest.$ac_ext
 cat >>conftest.$ac_ext <<_ACEOF
 /* end confdefs.h.  */
-#if defined STDC_HEADERS || defined HAVE_STDLIB_H
-# include <stdlib.h>
-#else
-char *malloc ();
-#endif
-
+$ac_includes_default
 int
 main ()
 {
-return ! malloc (0);
+
+  /* Are we little or big endian?  From Harbison&Steele.  */
+  union
+  {
+    long int l;
+    char c[sizeof (long int)];
+  } u;
+  u.l = 1;
+  return u.c[sizeof (long int) - 1] == 1;
+
   ;
   return 0;
 }
@@ -23468,178 +23580,137 @@ eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
   ac_status=$?
   echo "$as_me:$LINENO: \$? = $ac_status" >&5
   (exit $ac_status); }; }; then
-  ac_cv_func_malloc_0_nonnull=yes
+  ac_cv_c_bigendian=no
 else
   echo "$as_me: program exited with status $ac_status" >&5
 echo "$as_me: failed program was:" >&5
 sed 's/^/| /' conftest.$ac_ext >&5
 
 ( exit $ac_status )
-ac_cv_func_malloc_0_nonnull=no
+ac_cv_c_bigendian=yes
 fi
 rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext
 fi
 
 
 fi
-{ echo "$as_me:$LINENO: result: $ac_cv_func_malloc_0_nonnull" >&5
-echo "${ECHO_T}$ac_cv_func_malloc_0_nonnull" >&6; }
-if test $ac_cv_func_malloc_0_nonnull = yes; then
 
-cat >>confdefs.h <<\_ACEOF
-#define HAVE_MALLOC 1
-_ACEOF
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+{ echo "$as_me:$LINENO: result: $ac_cv_c_bigendian" >&5
+echo "${ECHO_T}$ac_cv_c_bigendian" >&6; }
+case $ac_cv_c_bigendian in
+  yes)
 
-else
-  cat >>confdefs.h <<\_ACEOF
-#define HAVE_MALLOC 0
+cat >>confdefs.h <<\_ACEOF
+#define WORDS_BIGENDIAN 1
 _ACEOF
-
-   case " $LIBOBJS " in
-  *" malloc.$ac_objext "* ) ;;
-  *) LIBOBJS="$LIBOBJS malloc.$ac_objext"
  ;;
+  no)
+     ;;
+  *)
+    { { echo "$as_me:$LINENO: error: unknown endianness
+presetting ac_cv_c_bigendian=no (or yes) will help" >&5
+echo "$as_me: error: unknown endianness
+presetting ac_cv_c_bigendian=no (or yes) will help" >&2;}
+   { (exit 1); exit 1; }; } ;;
 esac
 
+  if test "x$ac_cv_c_bigendian" = "xyes"; then
 
 cat >>confdefs.h <<\_ACEOF
-#define malloc rpl_malloc
+#define SLURM_BIGENDIAN 1
 _ACEOF
 
-fi
-
+  fi
 
 
-{ echo "$as_me:$LINENO: checking whether strerror_r is declared" >&5
-echo $ECHO_N "checking whether strerror_r is declared... $ECHO_C" >&6; }
-if test "${ac_cv_have_decl_strerror_r+set}" = set; then
+if test $ac_cv_c_compiler_gnu = yes; then
+    { echo "$as_me:$LINENO: checking whether $CC needs -traditional" >&5
+echo $ECHO_N "checking whether $CC needs -traditional... $ECHO_C" >&6; }
+if test "${ac_cv_prog_gcc_traditional+set}" = set; then
   echo $ECHO_N "(cached) $ECHO_C" >&6
 else
+    ac_pattern="Autoconf.*'x'"
   cat >conftest.$ac_ext <<_ACEOF
 /* confdefs.h.  */
 _ACEOF
 cat confdefs.h >>conftest.$ac_ext
 cat >>conftest.$ac_ext <<_ACEOF
 /* end confdefs.h.  */
-$ac_includes_default
-int
-main ()
-{
-#ifndef strerror_r
-  (void) strerror_r;
-#endif
-
-  ;
-  return 0;
-}
-_ACEOF
-rm -f conftest.$ac_objext
-if { (ac_try="$ac_compile"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_compile") 2>conftest.er1
-  ac_status=$?
-  grep -v '^ *+' conftest.er1 >conftest.err
-  rm -f conftest.er1
-  cat conftest.err >&5
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } && {
-	 test -z "$ac_c_werror_flag" ||
-	 test ! -s conftest.err
-       } && test -s conftest.$ac_objext; then
-  ac_cv_have_decl_strerror_r=yes
-else
-  echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-	ac_cv_have_decl_strerror_r=no
-fi
-
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-fi
-{ echo "$as_me:$LINENO: result: $ac_cv_have_decl_strerror_r" >&5
-echo "${ECHO_T}$ac_cv_have_decl_strerror_r" >&6; }
-if test $ac_cv_have_decl_strerror_r = yes; then
-
-cat >>confdefs.h <<_ACEOF
-#define HAVE_DECL_STRERROR_R 1
+#include <sgtty.h>
+Autoconf TIOCGETP
 _ACEOF
-
-
+if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
+  $EGREP "$ac_pattern" >/dev/null 2>&1; then
+  ac_cv_prog_gcc_traditional=yes
 else
-  cat >>confdefs.h <<_ACEOF
-#define HAVE_DECL_STRERROR_R 0
-_ACEOF
-
-
+  ac_cv_prog_gcc_traditional=no
 fi
+rm -f conftest*
 
 
-
-for ac_func in strerror_r
-do
-as_ac_var=`echo "ac_cv_func_$ac_func" | $as_tr_sh`
-{ echo "$as_me:$LINENO: checking for $ac_func" >&5
-echo $ECHO_N "checking for $ac_func... $ECHO_C" >&6; }
-if { as_var=$as_ac_var; eval "test \"\${$as_var+set}\" = set"; }; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
-else
-  cat >conftest.$ac_ext <<_ACEOF
+  if test $ac_cv_prog_gcc_traditional = no; then
+    cat >conftest.$ac_ext <<_ACEOF
 /* confdefs.h.  */
 _ACEOF
 cat confdefs.h >>conftest.$ac_ext
 cat >>conftest.$ac_ext <<_ACEOF
 /* end confdefs.h.  */
-/* Define $ac_func to an innocuous variant, in case <limits.h> declares $ac_func.
-   For example, HP-UX 11i <limits.h> declares gettimeofday.  */
-#define $ac_func innocuous_$ac_func
+#include <termio.h>
+Autoconf TCGETA
+_ACEOF
+if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
+  $EGREP "$ac_pattern" >/dev/null 2>&1; then
+  ac_cv_prog_gcc_traditional=yes
+fi
+rm -f conftest*
 
-/* System header to define __stub macros and hopefully few prototypes,
-    which can conflict with char $ac_func (); below.
-    Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
-    <limits.h> exists even on freestanding compilers.  */
+  fi
+fi
+{ echo "$as_me:$LINENO: result: $ac_cv_prog_gcc_traditional" >&5
+echo "${ECHO_T}$ac_cv_prog_gcc_traditional" >&6; }
+  if test $ac_cv_prog_gcc_traditional = yes; then
+    CC="$CC -traditional"
+  fi
+fi
 
-#ifdef __STDC__
-# include <limits.h>
-#else
-# include <assert.h>
-#endif
 
-#undef $ac_func
 
-/* Override any GCC internal prototype to avoid an error.
-   Use char because int might match the return type of a GCC
-   builtin and then its argument prototype would still apply.  */
-#ifdef __cplusplus
-extern "C"
-#endif
-char $ac_func ();
-/* The GNU C library defines this for functions which it implements
-    to always fail with ENOSYS.  Some functions are actually named
-    something starting with __ and the normal name is an alias.  */
-#if defined __stub_$ac_func || defined __stub___$ac_func
-choke me
-#endif
 
-int
-main ()
-{
-return $ac_func ();
-  ;
-  return 0;
-}
+for ac_header in stdlib.h
+do
+as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh`
+if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
+  { echo "$as_me:$LINENO: checking for $ac_header" >&5
+echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6; }
+if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
+  echo $ECHO_N "(cached) $ECHO_C" >&6
+fi
+ac_res=`eval echo '${'$as_ac_Header'}'`
+	       { echo "$as_me:$LINENO: result: $ac_res" >&5
+echo "${ECHO_T}$ac_res" >&6; }
+else
+  # Is the header compilable?
+{ echo "$as_me:$LINENO: checking $ac_header usability" >&5
+echo $ECHO_N "checking $ac_header usability... $ECHO_C" >&6; }
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h.  */
 _ACEOF
-rm -f conftest.$ac_objext conftest$ac_exeext
-if { (ac_try="$ac_link"
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h.  */
+$ac_includes_default
+#include <$ac_header>
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
 case "(($ac_try" in
   *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
   *) ac_try_echo=$ac_try;;
 esac
 eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_link") 2>conftest.er1
+  (eval "$ac_compile") 2>conftest.er1
   ac_status=$?
   grep -v '^ *+' conftest.er1 >conftest.err
   rm -f conftest.er1
@@ -23648,93 +23719,111 @@ eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
   (exit $ac_status); } && {
 	 test -z "$ac_c_werror_flag" ||
 	 test ! -s conftest.err
-       } && test -s conftest$ac_exeext &&
-       $as_test_x conftest$ac_exeext; then
-  eval "$as_ac_var=yes"
+       } && test -s conftest.$ac_objext; then
+  ac_header_compiler=yes
 else
   echo "$as_me: failed program was:" >&5
 sed 's/^/| /' conftest.$ac_ext >&5
 
-	eval "$as_ac_var=no"
-fi
-
-rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
-      conftest$ac_exeext conftest.$ac_ext
-fi
-ac_res=`eval echo '${'$as_ac_var'}'`
-	       { echo "$as_me:$LINENO: result: $ac_res" >&5
-echo "${ECHO_T}$ac_res" >&6; }
-if test `eval echo '${'$as_ac_var'}'` = yes; then
-  cat >>confdefs.h <<_ACEOF
-#define `echo "HAVE_$ac_func" | $as_tr_cpp` 1
-_ACEOF
-
+	ac_header_compiler=no
 fi
-done
 
-{ echo "$as_me:$LINENO: checking whether strerror_r returns char *" >&5
-echo $ECHO_N "checking whether strerror_r returns char *... $ECHO_C" >&6; }
-if test "${ac_cv_func_strerror_r_char_p+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
-else
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+{ echo "$as_me:$LINENO: result: $ac_header_compiler" >&5
+echo "${ECHO_T}$ac_header_compiler" >&6; }
 
-    ac_cv_func_strerror_r_char_p=no
-    if test $ac_cv_have_decl_strerror_r = yes; then
-      cat >conftest.$ac_ext <<_ACEOF
+# Is the header present?
+{ echo "$as_me:$LINENO: checking $ac_header presence" >&5
+echo $ECHO_N "checking $ac_header presence... $ECHO_C" >&6; }
+cat >conftest.$ac_ext <<_ACEOF
 /* confdefs.h.  */
 _ACEOF
 cat confdefs.h >>conftest.$ac_ext
 cat >>conftest.$ac_ext <<_ACEOF
 /* end confdefs.h.  */
-$ac_includes_default
-int
-main ()
-{
-
-	  char buf[100];
-	  char x = *strerror_r (0, buf, sizeof buf);
-	  char *p = strerror_r (0, buf, sizeof buf);
-	  return !p || x;
-
-  ;
-  return 0;
-}
+#include <$ac_header>
 _ACEOF
-rm -f conftest.$ac_objext
-if { (ac_try="$ac_compile"
+if { (ac_try="$ac_cpp conftest.$ac_ext"
 case "(($ac_try" in
   *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
   *) ac_try_echo=$ac_try;;
 esac
 eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_compile") 2>conftest.er1
+  (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1
   ac_status=$?
   grep -v '^ *+' conftest.er1 >conftest.err
   rm -f conftest.er1
   cat conftest.err >&5
   echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } && {
-	 test -z "$ac_c_werror_flag" ||
+  (exit $ac_status); } >/dev/null && {
+	 test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" ||
 	 test ! -s conftest.err
-       } && test -s conftest.$ac_objext; then
-  ac_cv_func_strerror_r_char_p=yes
+       }; then
+  ac_header_preproc=yes
 else
   echo "$as_me: failed program was:" >&5
 sed 's/^/| /' conftest.$ac_ext >&5
 
+  ac_header_preproc=no
+fi
+
+rm -f conftest.err conftest.$ac_ext
+{ echo "$as_me:$LINENO: result: $ac_header_preproc" >&5
+echo "${ECHO_T}$ac_header_preproc" >&6; }
+
+# So?  What about this header?
+case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in
+  yes:no: )
+    { echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5
+echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;}
+    { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5
+echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;}
+    ac_header_preproc=yes
+    ;;
+  no:yes:* )
+    { echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5
+echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;}
+    { echo "$as_me:$LINENO: WARNING: $ac_header:     check for missing prerequisite headers?" >&5
+echo "$as_me: WARNING: $ac_header:     check for missing prerequisite headers?" >&2;}
+    { echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5
+echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;}
+    { echo "$as_me:$LINENO: WARNING: $ac_header:     section \"Present But Cannot Be Compiled\"" >&5
+echo "$as_me: WARNING: $ac_header:     section \"Present But Cannot Be Compiled\"" >&2;}
+    { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5
+echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;}
+    { echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5
+echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;}
 
+    ;;
+esac
+{ echo "$as_me:$LINENO: checking for $ac_header" >&5
+echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6; }
+if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
+  echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+  eval "$as_ac_Header=\$ac_header_preproc"
 fi
+ac_res=`eval echo '${'$as_ac_Header'}'`
+	       { echo "$as_me:$LINENO: result: $ac_res" >&5
+echo "${ECHO_T}$ac_res" >&6; }
 
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-    else
-      # strerror_r is not declared.  Choose between
-      # systems that have relatively inaccessible declarations for the
-      # function.  BeOS and DEC UNIX 4.0 fall in this category, but the
-      # former has a strerror_r that returns char*, while the latter
-      # has a strerror_r that returns `int'.
-      # This test should segfault on the DEC system.
-      if test "$cross_compiling" = yes; then
-  :
+fi
+if test `eval echo '${'$as_ac_Header'}'` = yes; then
+  cat >>confdefs.h <<_ACEOF
+#define `echo "HAVE_$ac_header" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+
+done
+
+{ echo "$as_me:$LINENO: checking for GNU libc compatible malloc" >&5
+echo $ECHO_N "checking for GNU libc compatible malloc... $ECHO_C" >&6; }
+if test "${ac_cv_func_malloc_0_nonnull+set}" = set; then
+  echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+  if test "$cross_compiling" = yes; then
+  ac_cv_func_malloc_0_nonnull=no
 else
   cat >conftest.$ac_ext <<_ACEOF
 /* confdefs.h.  */
@@ -23742,14 +23831,16 @@ _ACEOF
 cat confdefs.h >>conftest.$ac_ext
 cat >>conftest.$ac_ext <<_ACEOF
 /* end confdefs.h.  */
-$ac_includes_default
-	extern char *strerror_r ();
+#if defined STDC_HEADERS || defined HAVE_STDLIB_H
+# include <stdlib.h>
+#else
+char *malloc ();
+#endif
+
 int
 main ()
 {
-char buf[100];
-	  char x = *strerror_r (0, buf, sizeof buf);
-	  return ! isalpha (x);
+return ! malloc (0);
   ;
   return 0;
 }
@@ -23774,57 +23865,118 @@ eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
   ac_status=$?
   echo "$as_me:$LINENO: \$? = $ac_status" >&5
   (exit $ac_status); }; }; then
-  ac_cv_func_strerror_r_char_p=yes
+  ac_cv_func_malloc_0_nonnull=yes
 else
   echo "$as_me: program exited with status $ac_status" >&5
 echo "$as_me: failed program was:" >&5
 sed 's/^/| /' conftest.$ac_ext >&5
 
+( exit $ac_status )
+ac_cv_func_malloc_0_nonnull=no
 fi
 rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext
 fi
 
 
-    fi
-
 fi
-{ echo "$as_me:$LINENO: result: $ac_cv_func_strerror_r_char_p" >&5
-echo "${ECHO_T}$ac_cv_func_strerror_r_char_p" >&6; }
-if test $ac_cv_func_strerror_r_char_p = yes; then
+{ echo "$as_me:$LINENO: result: $ac_cv_func_malloc_0_nonnull" >&5
+echo "${ECHO_T}$ac_cv_func_malloc_0_nonnull" >&6; }
+if test $ac_cv_func_malloc_0_nonnull = yes; then
 
 cat >>confdefs.h <<\_ACEOF
-#define STRERROR_R_CHAR_P 1
+#define HAVE_MALLOC 1
 _ACEOF
 
-fi
-
-
-
+else
+  cat >>confdefs.h <<\_ACEOF
+#define HAVE_MALLOC 0
+_ACEOF
 
+   case " $LIBOBJS " in
+  *" malloc.$ac_objext "* ) ;;
+  *) LIBOBJS="$LIBOBJS malloc.$ac_objext"
+ ;;
+esac
 
 
+cat >>confdefs.h <<\_ACEOF
+#define malloc rpl_malloc
+_ACEOF
 
+fi
 
 
 
+{ echo "$as_me:$LINENO: checking whether strerror_r is declared" >&5
+echo $ECHO_N "checking whether strerror_r is declared... $ECHO_C" >&6; }
+if test "${ac_cv_have_decl_strerror_r+set}" = set; then
+  echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+  cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h.  */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h.  */
+$ac_includes_default
+int
+main ()
+{
+#ifndef strerror_r
+  (void) strerror_r;
+#endif
 
+  ;
+  return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+  (eval "$ac_compile") 2>conftest.er1
+  ac_status=$?
+  grep -v '^ *+' conftest.er1 >conftest.err
+  rm -f conftest.er1
+  cat conftest.err >&5
+  echo "$as_me:$LINENO: \$? = $ac_status" >&5
+  (exit $ac_status); } && {
+	 test -z "$ac_c_werror_flag" ||
+	 test ! -s conftest.err
+       } && test -s conftest.$ac_objext; then
+  ac_cv_have_decl_strerror_r=yes
+else
+  echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
 
+	ac_cv_have_decl_strerror_r=no
+fi
 
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+{ echo "$as_me:$LINENO: result: $ac_cv_have_decl_strerror_r" >&5
+echo "${ECHO_T}$ac_cv_have_decl_strerror_r" >&6; }
+if test $ac_cv_have_decl_strerror_r = yes; then
 
-for ac_func in \
-   fdatasync \
-   hstrerror \
-   strerror  \
-   mtrace    \
-   strndup   \
-   strlcpy   \
-   strsignal \
-   inet_aton \
-   inet_ntop \
-   inet_pton \
-   setproctitle \
-   sysctlbyname \
+cat >>confdefs.h <<_ACEOF
+#define HAVE_DECL_STRERROR_R 1
+_ACEOF
 
+
+else
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_DECL_STRERROR_R 0
+_ACEOF
+
+
+fi
+
+
+
+for ac_func in strerror_r
 do
 as_ac_var=`echo "ac_cv_func_$ac_func" | $as_tr_sh`
 { echo "$as_me:$LINENO: checking for $ac_func" >&5
@@ -23917,13 +24069,15 @@ _ACEOF
 fi
 done
 
-
-{ echo "$as_me:$LINENO: checking whether hstrerror is declared" >&5
-echo $ECHO_N "checking whether hstrerror is declared... $ECHO_C" >&6; }
-if test "${ac_cv_have_decl_hstrerror+set}" = set; then
+{ echo "$as_me:$LINENO: checking whether strerror_r returns char *" >&5
+echo $ECHO_N "checking whether strerror_r returns char *... $ECHO_C" >&6; }
+if test "${ac_cv_func_strerror_r_char_p+set}" = set; then
   echo $ECHO_N "(cached) $ECHO_C" >&6
 else
-  cat >conftest.$ac_ext <<_ACEOF
+
+    ac_cv_func_strerror_r_char_p=no
+    if test $ac_cv_have_decl_strerror_r = yes; then
+      cat >conftest.$ac_ext <<_ACEOF
 /* confdefs.h.  */
 _ACEOF
 cat confdefs.h >>conftest.$ac_ext
@@ -23933,9 +24087,11 @@ $ac_includes_default
 int
 main ()
 {
-#ifndef hstrerror
-  (void) hstrerror;
-#endif
+
+	  char buf[100];
+	  char x = *strerror_r (0, buf, sizeof buf);
+	  char *p = strerror_r (0, buf, sizeof buf);
+	  return !p || x;
 
   ;
   return 0;
@@ -23958,36 +24114,24 @@ eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
 	 test -z "$ac_c_werror_flag" ||
 	 test ! -s conftest.err
        } && test -s conftest.$ac_objext; then
-  ac_cv_have_decl_hstrerror=yes
+  ac_cv_func_strerror_r_char_p=yes
 else
   echo "$as_me: failed program was:" >&5
 sed 's/^/| /' conftest.$ac_ext >&5
 
-	ac_cv_have_decl_hstrerror=no
-fi
 
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
 fi
-{ echo "$as_me:$LINENO: result: $ac_cv_have_decl_hstrerror" >&5
-echo "${ECHO_T}$ac_cv_have_decl_hstrerror" >&6; }
-if test $ac_cv_have_decl_hstrerror = yes; then
-
-cat >>confdefs.h <<_ACEOF
-#define HAVE_DECL_HSTRERROR 1
-_ACEOF
-
-
-else
-  cat >>confdefs.h <<_ACEOF
-#define HAVE_DECL_HSTRERROR 0
-_ACEOF
 
-
-fi
-{ echo "$as_me:$LINENO: checking whether strsignal is declared" >&5
-echo $ECHO_N "checking whether strsignal is declared... $ECHO_C" >&6; }
-if test "${ac_cv_have_decl_strsignal+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+    else
+      # strerror_r is not declared.  Choose between
+      # systems that have relatively inaccessible declarations for the
+      # function.  BeOS and DEC UNIX 4.0 fall in this category, but the
+      # former has a strerror_r that returns char*, while the latter
+      # has a strerror_r that returns `int'.
+      # This test should segfault on the DEC system.
+      if test "$cross_compiling" = yes; then
+  :
 else
   cat >conftest.$ac_ext <<_ACEOF
 /* confdefs.h.  */
@@ -23996,131 +24140,88 @@ cat confdefs.h >>conftest.$ac_ext
 cat >>conftest.$ac_ext <<_ACEOF
 /* end confdefs.h.  */
 $ac_includes_default
+	extern char *strerror_r ();
 int
 main ()
 {
-#ifndef strsignal
-  (void) strsignal;
-#endif
-
+char buf[100];
+	  char x = *strerror_r (0, buf, sizeof buf);
+	  return ! isalpha (x);
   ;
   return 0;
 }
 _ACEOF
-rm -f conftest.$ac_objext
-if { (ac_try="$ac_compile"
+rm -f conftest$ac_exeext
+if { (ac_try="$ac_link"
 case "(($ac_try" in
   *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
   *) ac_try_echo=$ac_try;;
 esac
 eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_compile") 2>conftest.er1
+  (eval "$ac_link") 2>&5
   ac_status=$?
-  grep -v '^ *+' conftest.er1 >conftest.err
-  rm -f conftest.er1
-  cat conftest.err >&5
   echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } && {
-	 test -z "$ac_c_werror_flag" ||
-	 test ! -s conftest.err
-       } && test -s conftest.$ac_objext; then
-  ac_cv_have_decl_strsignal=yes
+  (exit $ac_status); } && { ac_try='./conftest$ac_exeext'
+  { (case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+  (eval "$ac_try") 2>&5
+  ac_status=$?
+  echo "$as_me:$LINENO: \$? = $ac_status" >&5
+  (exit $ac_status); }; }; then
+  ac_cv_func_strerror_r_char_p=yes
 else
-  echo "$as_me: failed program was:" >&5
+  echo "$as_me: program exited with status $ac_status" >&5
+echo "$as_me: failed program was:" >&5
 sed 's/^/| /' conftest.$ac_ext >&5
 
-	ac_cv_have_decl_strsignal=no
+fi
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext
 fi
 
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+    fi
+
 fi
-{ echo "$as_me:$LINENO: result: $ac_cv_have_decl_strsignal" >&5
-echo "${ECHO_T}$ac_cv_have_decl_strsignal" >&6; }
-if test $ac_cv_have_decl_strsignal = yes; then
+{ echo "$as_me:$LINENO: result: $ac_cv_func_strerror_r_char_p" >&5
+echo "${ECHO_T}$ac_cv_func_strerror_r_char_p" >&6; }
+if test $ac_cv_func_strerror_r_char_p = yes; then
 
-cat >>confdefs.h <<_ACEOF
-#define HAVE_DECL_STRSIGNAL 1
+cat >>confdefs.h <<\_ACEOF
+#define STRERROR_R_CHAR_P 1
 _ACEOF
 
+fi
 
-else
-  cat >>confdefs.h <<_ACEOF
-#define HAVE_DECL_STRSIGNAL 0
-_ACEOF
 
 
-fi
-{ echo "$as_me:$LINENO: checking whether sys_siglist is declared" >&5
-echo $ECHO_N "checking whether sys_siglist is declared... $ECHO_C" >&6; }
-if test "${ac_cv_have_decl_sys_siglist+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
-else
-  cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
-/* end confdefs.h.  */
-$ac_includes_default
-int
-main ()
-{
-#ifndef sys_siglist
-  (void) sys_siglist;
-#endif
 
-  ;
-  return 0;
-}
-_ACEOF
-rm -f conftest.$ac_objext
-if { (ac_try="$ac_compile"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_compile") 2>conftest.er1
-  ac_status=$?
-  grep -v '^ *+' conftest.er1 >conftest.err
-  rm -f conftest.er1
-  cat conftest.err >&5
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } && {
-	 test -z "$ac_c_werror_flag" ||
-	 test ! -s conftest.err
-       } && test -s conftest.$ac_objext; then
-  ac_cv_have_decl_sys_siglist=yes
-else
-  echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
 
-	ac_cv_have_decl_sys_siglist=no
-fi
 
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-fi
-{ echo "$as_me:$LINENO: result: $ac_cv_have_decl_sys_siglist" >&5
-echo "${ECHO_T}$ac_cv_have_decl_sys_siglist" >&6; }
-if test $ac_cv_have_decl_sys_siglist = yes; then
 
-cat >>confdefs.h <<_ACEOF
-#define HAVE_DECL_SYS_SIGLIST 1
-_ACEOF
 
 
-else
-  cat >>confdefs.h <<_ACEOF
-#define HAVE_DECL_SYS_SIGLIST 0
-_ACEOF
 
 
-fi
 
 
 
+for ac_func in \
+   fdatasync \
+   hstrerror \
+   strerror  \
+   mtrace    \
+   strndup   \
+   strlcpy   \
+   strsignal \
+   inet_aton \
+   inet_ntop \
+   inet_pton \
+   setproctitle \
+   sysctlbyname \
 
-for ac_func in unsetenv
 do
 as_ac_var=`echo "ac_cv_func_$ac_func" | $as_tr_sh`
 { echo "$as_me:$LINENO: checking for $ac_func" >&5
@@ -24209,74 +24310,42 @@ if test `eval echo '${'$as_ac_var'}'` = yes; then
   cat >>confdefs.h <<_ACEOF
 #define `echo "HAVE_$ac_func" | $as_tr_cpp` 1
 _ACEOF
- have_unsetenv=yes
-fi
-done
 
- if test "x$have_unsetenv" = "xyes"; then
-  HAVE_UNSETENV_TRUE=
-  HAVE_UNSETENV_FALSE='#'
-else
-  HAVE_UNSETENV_TRUE='#'
-  HAVE_UNSETENV_FALSE=
 fi
+done
 
 
-
-
-
-ac_ext=c
-ac_cpp='$CPP $CPPFLAGS'
-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
-ac_compiler_gnu=$ac_cv_c_compiler_gnu
-
-acx_pthread_ok=no
-
-# We used to check for pthread.h first, but this fails if pthread.h
-# requires special compiler flags (e.g. on True64 or Sequent).
-# It gets checked for in the link test anyway.
-
-# First of all, check if the user has set any of the PTHREAD_LIBS,
-# etcetera environment variables, and if threads linking works using
-# them:
-if test x"$PTHREAD_LIBS$PTHREAD_CFLAGS" != x; then
-        save_CFLAGS="$CFLAGS"
-        CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
-        save_LIBS="$LIBS"
-        LIBS="$PTHREAD_LIBS $LIBS"
-        { echo "$as_me:$LINENO: checking for pthread_join in LIBS=$PTHREAD_LIBS with CFLAGS=$PTHREAD_CFLAGS" >&5
-echo $ECHO_N "checking for pthread_join in LIBS=$PTHREAD_LIBS with CFLAGS=$PTHREAD_CFLAGS... $ECHO_C" >&6; }
-        cat >conftest.$ac_ext <<_ACEOF
+{ echo "$as_me:$LINENO: checking whether hstrerror is declared" >&5
+echo $ECHO_N "checking whether hstrerror is declared... $ECHO_C" >&6; }
+if test "${ac_cv_have_decl_hstrerror+set}" = set; then
+  echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+  cat >conftest.$ac_ext <<_ACEOF
 /* confdefs.h.  */
 _ACEOF
 cat confdefs.h >>conftest.$ac_ext
 cat >>conftest.$ac_ext <<_ACEOF
 /* end confdefs.h.  */
-
-/* Override any GCC internal prototype to avoid an error.
-   Use char because int might match the return type of a GCC
-   builtin and then its argument prototype would still apply.  */
-#ifdef __cplusplus
-extern "C"
-#endif
-char pthread_join ();
+$ac_includes_default
 int
 main ()
 {
-return pthread_join ();
+#ifndef hstrerror
+  (void) hstrerror;
+#endif
+
   ;
   return 0;
 }
-_ACEOF
-rm -f conftest.$ac_objext conftest$ac_exeext
-if { (ac_try="$ac_link"
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
 case "(($ac_try" in
   *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
   *) ac_try_echo=$ac_try;;
 esac
 eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_link") 2>conftest.er1
+  (eval "$ac_compile") 2>conftest.er1
   ac_status=$?
   grep -v '^ *+' conftest.er1 >conftest.err
   rm -f conftest.er1
@@ -24285,133 +24354,64 @@ eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
   (exit $ac_status); } && {
 	 test -z "$ac_c_werror_flag" ||
 	 test ! -s conftest.err
-       } && test -s conftest$ac_exeext &&
-       $as_test_x conftest$ac_exeext; then
-  acx_pthread_ok=yes
+       } && test -s conftest.$ac_objext; then
+  ac_cv_have_decl_hstrerror=yes
 else
   echo "$as_me: failed program was:" >&5
 sed 's/^/| /' conftest.$ac_ext >&5
 
-
+	ac_cv_have_decl_hstrerror=no
 fi
 
-rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
-      conftest$ac_exeext conftest.$ac_ext
-        { echo "$as_me:$LINENO: result: $acx_pthread_ok" >&5
-echo "${ECHO_T}$acx_pthread_ok" >&6; }
-        if test x"$acx_pthread_ok" = xno; then
-                PTHREAD_LIBS=""
-                PTHREAD_CFLAGS=""
-        fi
-        LIBS="$save_LIBS"
-        CFLAGS="$save_CFLAGS"
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
 fi
+{ echo "$as_me:$LINENO: result: $ac_cv_have_decl_hstrerror" >&5
+echo "${ECHO_T}$ac_cv_have_decl_hstrerror" >&6; }
+if test $ac_cv_have_decl_hstrerror = yes; then
 
-# We must check for the threads library under a number of different
-# names; the ordering is very important because some systems
-# (e.g. DEC) have both -lpthread and -lpthreads, where one of the
-# libraries is broken (non-POSIX).
-
-# Create a list of thread flags to try.  Items starting with a "-" are
-# C compiler flags, and other items are library names, except for "none"
-# which indicates that we try without any flags at all.
-
-acx_pthread_flags="pthreads none -Kthread -kthread lthread -pthread -pthreads -mthreads pthread --thread-safe -mt"
-
-# The ordering *is* (sometimes) important.  Some notes on the
-# individual items follow:
-
-# pthreads: AIX (must check this before -lpthread)
-# none: in case threads are in libc; should be tried before -Kthread and
-#       other compiler flags to prevent continual compiler warnings
-# -Kthread: Sequent (threads in libc, but -Kthread needed for pthread.h)
-# -kthread: FreeBSD kernel threads (preferred to -pthread since SMP-able)
-# lthread: LinuxThreads port on FreeBSD (also preferred to -pthread)
-# -pthread: Linux/gcc (kernel threads), BSD/gcc (userland threads)
-# -pthreads: Solaris/gcc
-# -mthreads: Mingw32/gcc, Lynx/gcc
-# -mt: Sun Workshop C (may only link SunOS threads [-lthread], but it
-#      doesn't hurt to check since this sometimes defines pthreads too;
-#      also defines -D_REENTRANT)
-# pthread: Linux, etcetera
-# --thread-safe: KAI C++
-
-case "${host_cpu}-${host_os}" in
-        *solaris*)
-
-        # On Solaris (at least, for some versions), libc contains stubbed
-        # (non-functional) versions of the pthreads routines, so link-based
-        # tests will erroneously succeed.  (We need to link with -pthread or
-        # -lpthread.)  (The stubs are missing pthread_cleanup_push, or rather
-        # a function called by this macro, so we could check for that, but
-        # who knows whether they'll stub that too in a future libc.)  So,
-        # we'll just look for -pthreads and -lpthread first:
-
-        acx_pthread_flags="-pthread -pthreads pthread -mt $acx_pthread_flags"
-        ;;
-esac
-
-if test x"$acx_pthread_ok" = xno; then
-for flag in $acx_pthread_flags; do
-
-        case $flag in
-                none)
-                { echo "$as_me:$LINENO: checking whether pthreads work without any flags" >&5
-echo $ECHO_N "checking whether pthreads work without any flags... $ECHO_C" >&6; }
-                ;;
+cat >>confdefs.h <<_ACEOF
+#define HAVE_DECL_HSTRERROR 1
+_ACEOF
 
-                -*)
-                { echo "$as_me:$LINENO: checking whether pthreads work with $flag" >&5
-echo $ECHO_N "checking whether pthreads work with $flag... $ECHO_C" >&6; }
-                PTHREAD_CFLAGS="$flag"
-                ;;
 
-                *)
-                { echo "$as_me:$LINENO: checking for the pthreads library -l$flag" >&5
-echo $ECHO_N "checking for the pthreads library -l$flag... $ECHO_C" >&6; }
-                PTHREAD_LIBS="-l$flag"
-                ;;
-        esac
+else
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_DECL_HSTRERROR 0
+_ACEOF
 
-        save_LIBS="$LIBS"
-        save_CFLAGS="$CFLAGS"
-        LIBS="$PTHREAD_LIBS $LIBS"
-        CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
 
-        # Check for various functions.  We must include pthread.h,
-        # since some functions may be macros.  (On the Sequent, we
-        # need a special flag -Kthread to make this header compile.)
-        # We check for pthread_join because it is in -lpthread on IRIX
-        # while pthread_create is in libc.  We check for pthread_attr_init
-        # due to DEC craziness with -lpthreads.  We check for
-        # pthread_cleanup_push because it is one of the few pthread
-        # functions on Solaris that doesn't have a non-functional libc stub.
-        # We try pthread_create on general principles.
-        cat >conftest.$ac_ext <<_ACEOF
+fi
+{ echo "$as_me:$LINENO: checking whether strsignal is declared" >&5
+echo $ECHO_N "checking whether strsignal is declared... $ECHO_C" >&6; }
+if test "${ac_cv_have_decl_strsignal+set}" = set; then
+  echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+  cat >conftest.$ac_ext <<_ACEOF
 /* confdefs.h.  */
 _ACEOF
 cat confdefs.h >>conftest.$ac_ext
 cat >>conftest.$ac_ext <<_ACEOF
 /* end confdefs.h.  */
-#include <pthread.h>
+$ac_includes_default
 int
 main ()
 {
-pthread_t th; pthread_join(th, 0);
-                     pthread_attr_init(0); pthread_cleanup_push(0, 0);
-                     pthread_create(0,0,0,0); pthread_cleanup_pop(0);
+#ifndef strsignal
+  (void) strsignal;
+#endif
+
   ;
   return 0;
 }
 _ACEOF
-rm -f conftest.$ac_objext conftest$ac_exeext
-if { (ac_try="$ac_link"
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
 case "(($ac_try" in
   *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
   *) ac_try_echo=$ac_try;;
 esac
 eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_link") 2>conftest.er1
+  (eval "$ac_compile") 2>conftest.er1
   ac_status=$?
   grep -v '^ *+' conftest.er1 >conftest.err
   rm -f conftest.er1
@@ -24420,67 +24420,64 @@ eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
   (exit $ac_status); } && {
 	 test -z "$ac_c_werror_flag" ||
 	 test ! -s conftest.err
-       } && test -s conftest$ac_exeext &&
-       $as_test_x conftest$ac_exeext; then
-  acx_pthread_ok=yes
+       } && test -s conftest.$ac_objext; then
+  ac_cv_have_decl_strsignal=yes
 else
   echo "$as_me: failed program was:" >&5
 sed 's/^/| /' conftest.$ac_ext >&5
 
-
+	ac_cv_have_decl_strsignal=no
 fi
 
-rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
-      conftest$ac_exeext conftest.$ac_ext
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+{ echo "$as_me:$LINENO: result: $ac_cv_have_decl_strsignal" >&5
+echo "${ECHO_T}$ac_cv_have_decl_strsignal" >&6; }
+if test $ac_cv_have_decl_strsignal = yes; then
 
-        LIBS="$save_LIBS"
-        CFLAGS="$save_CFLAGS"
+cat >>confdefs.h <<_ACEOF
+#define HAVE_DECL_STRSIGNAL 1
+_ACEOF
 
-        { echo "$as_me:$LINENO: result: $acx_pthread_ok" >&5
-echo "${ECHO_T}$acx_pthread_ok" >&6; }
-        if test "x$acx_pthread_ok" = xyes; then
-                break;
-        fi
 
-        PTHREAD_LIBS=""
-        PTHREAD_CFLAGS=""
-done
-fi
+else
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_DECL_STRSIGNAL 0
+_ACEOF
 
-# Various other checks:
-if test "x$acx_pthread_ok" = xyes; then
-        save_LIBS="$LIBS"
-        LIBS="$PTHREAD_LIBS $LIBS"
-        save_CFLAGS="$CFLAGS"
-        CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
 
-        # Detect AIX lossage: threads are created detached by default
-        # and the JOINABLE attribute has a nonstandard name (UNDETACHED).
-        { echo "$as_me:$LINENO: checking for joinable pthread attribute" >&5
-echo $ECHO_N "checking for joinable pthread attribute... $ECHO_C" >&6; }
-        cat >conftest.$ac_ext <<_ACEOF
+fi
+{ echo "$as_me:$LINENO: checking whether sys_siglist is declared" >&5
+echo $ECHO_N "checking whether sys_siglist is declared... $ECHO_C" >&6; }
+if test "${ac_cv_have_decl_sys_siglist+set}" = set; then
+  echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+  cat >conftest.$ac_ext <<_ACEOF
 /* confdefs.h.  */
 _ACEOF
 cat confdefs.h >>conftest.$ac_ext
 cat >>conftest.$ac_ext <<_ACEOF
 /* end confdefs.h.  */
-#include <pthread.h>
+$ac_includes_default
 int
 main ()
 {
-int attr=PTHREAD_CREATE_JOINABLE;
+#ifndef sys_siglist
+  (void) sys_siglist;
+#endif
+
   ;
   return 0;
 }
 _ACEOF
-rm -f conftest.$ac_objext conftest$ac_exeext
-if { (ac_try="$ac_link"
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
 case "(($ac_try" in
   *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
   *) ac_try_echo=$ac_try;;
 esac
 eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_link") 2>conftest.er1
+  (eval "$ac_compile") 2>conftest.er1
   ac_status=$?
   grep -v '^ *+' conftest.er1 >conftest.err
   rm -f conftest.er1
@@ -24489,30 +24486,86 @@ eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
   (exit $ac_status); } && {
 	 test -z "$ac_c_werror_flag" ||
 	 test ! -s conftest.err
-       } && test -s conftest$ac_exeext &&
-       $as_test_x conftest$ac_exeext; then
-  ok=PTHREAD_CREATE_JOINABLE
+       } && test -s conftest.$ac_objext; then
+  ac_cv_have_decl_sys_siglist=yes
 else
   echo "$as_me: failed program was:" >&5
 sed 's/^/| /' conftest.$ac_ext >&5
 
-	ok=unknown
-fi
+	ac_cv_have_decl_sys_siglist=no
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+{ echo "$as_me:$LINENO: result: $ac_cv_have_decl_sys_siglist" >&5
+echo "${ECHO_T}$ac_cv_have_decl_sys_siglist" >&6; }
+if test $ac_cv_have_decl_sys_siglist = yes; then
+
+cat >>confdefs.h <<_ACEOF
+#define HAVE_DECL_SYS_SIGLIST 1
+_ACEOF
+
+
+else
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_DECL_SYS_SIGLIST 0
+_ACEOF
+
+
+fi
+
+
+
+
+for ac_func in unsetenv
+do
+as_ac_var=`echo "ac_cv_func_$ac_func" | $as_tr_sh`
+{ echo "$as_me:$LINENO: checking for $ac_func" >&5
+echo $ECHO_N "checking for $ac_func... $ECHO_C" >&6; }
+if { as_var=$as_ac_var; eval "test \"\${$as_var+set}\" = set"; }; then
+  echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+  cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h.  */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h.  */
+/* Define $ac_func to an innocuous variant, in case <limits.h> declares $ac_func.
+   For example, HP-UX 11i <limits.h> declares gettimeofday.  */
+#define $ac_func innocuous_$ac_func
+
+/* System header to define __stub macros and hopefully few prototypes,
+    which can conflict with char $ac_func (); below.
+    Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+    <limits.h> exists even on freestanding compilers.  */
+
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+
+#undef $ac_func
+
+/* Override any GCC internal prototype to avoid an error.
+   Use char because int might match the return type of a GCC
+   builtin and then its argument prototype would still apply.  */
+#ifdef __cplusplus
+extern "C"
+#endif
+char $ac_func ();
+/* The GNU C library defines this for functions which it implements
+    to always fail with ENOSYS.  Some functions are actually named
+    something starting with __ and the normal name is an alias.  */
+#if defined __stub_$ac_func || defined __stub___$ac_func
+choke me
+#endif
 
-rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
-      conftest$ac_exeext conftest.$ac_ext
-        if test x"$ok" = xunknown; then
-                cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
-/* end confdefs.h.  */
-#include <pthread.h>
 int
 main ()
 {
-int attr=PTHREAD_CREATE_UNDETACHED;
+return $ac_func ();
   ;
   return 0;
 }
@@ -24535,236 +24588,80 @@ eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
 	 test ! -s conftest.err
        } && test -s conftest$ac_exeext &&
        $as_test_x conftest$ac_exeext; then
-  ok=PTHREAD_CREATE_UNDETACHED
+  eval "$as_ac_var=yes"
 else
   echo "$as_me: failed program was:" >&5
 sed 's/^/| /' conftest.$ac_ext >&5
 
-	ok=unknown
+	eval "$as_ac_var=no"
 fi
 
 rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
       conftest$ac_exeext conftest.$ac_ext
-        fi
-        if test x"$ok" != xPTHREAD_CREATE_JOINABLE; then
-
-cat >>confdefs.h <<\_ACEOF
-#define PTHREAD_CREATE_JOINABLE $ok
-_ACEOF
-
-        fi
-        { echo "$as_me:$LINENO: result: ${ok}" >&5
-echo "${ECHO_T}${ok}" >&6; }
-        if test x"$ok" = xunknown; then
-                { echo "$as_me:$LINENO: WARNING: we do not know how to create joinable pthreads" >&5
-echo "$as_me: WARNING: we do not know how to create joinable pthreads" >&2;}
-        fi
-
-        { echo "$as_me:$LINENO: checking if more special flags are required for pthreads" >&5
-echo $ECHO_N "checking if more special flags are required for pthreads... $ECHO_C" >&6; }
-        flag=no
-        case "${host_cpu}-${host_os}" in
-                *-aix* | *-freebsd*)     flag="-D_THREAD_SAFE";;
-                *solaris* | alpha*-osf*) flag="-D_REENTRANT";;
-        esac
-        { echo "$as_me:$LINENO: result: ${flag}" >&5
-echo "${ECHO_T}${flag}" >&6; }
-        if test "x$flag" != xno; then
-                PTHREAD_CFLAGS="$flag $PTHREAD_CFLAGS"
-        fi
-
-        LIBS="$save_LIBS"
-        CFLAGS="$save_CFLAGS"
-
-        # More AIX lossage: must compile with cc_r
-        # Extract the first word of "cc_r", so it can be a program name with args.
-set dummy cc_r; ac_word=$2
-{ echo "$as_me:$LINENO: checking for $ac_word" >&5
-echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; }
-if test "${ac_cv_prog_PTHREAD_CC+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
-else
-  if test -n "$PTHREAD_CC"; then
-  ac_cv_prog_PTHREAD_CC="$PTHREAD_CC" # Let the user override the test.
-else
-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-  for ac_exec_ext in '' $ac_executable_extensions; do
-  if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
-    ac_cv_prog_PTHREAD_CC="cc_r"
-    echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
-    break 2
-  fi
-done
-done
-IFS=$as_save_IFS
-
-  test -z "$ac_cv_prog_PTHREAD_CC" && ac_cv_prog_PTHREAD_CC="${CC}"
-fi
 fi
-PTHREAD_CC=$ac_cv_prog_PTHREAD_CC
-if test -n "$PTHREAD_CC"; then
-  { echo "$as_me:$LINENO: result: $PTHREAD_CC" >&5
-echo "${ECHO_T}$PTHREAD_CC" >&6; }
-else
-  { echo "$as_me:$LINENO: result: no" >&5
-echo "${ECHO_T}no" >&6; }
+ac_res=`eval echo '${'$as_ac_var'}'`
+	       { echo "$as_me:$LINENO: result: $ac_res" >&5
+echo "${ECHO_T}$ac_res" >&6; }
+if test `eval echo '${'$as_ac_var'}'` = yes; then
+  cat >>confdefs.h <<_ACEOF
+#define `echo "HAVE_$ac_func" | $as_tr_cpp` 1
+_ACEOF
+ have_unsetenv=yes
 fi
+done
 
-
+ if test "x$have_unsetenv" = "xyes"; then
+  HAVE_UNSETENV_TRUE=
+  HAVE_UNSETENV_FALSE='#'
 else
-        PTHREAD_CC="$CC"
+  HAVE_UNSETENV_TRUE='#'
+  HAVE_UNSETENV_FALSE=
 fi
 
 
 
 
 
-# Finally, execute ACTION-IF-FOUND/ACTION-IF-NOT-FOUND:
-if test x"$acx_pthread_ok" = xyes; then
-
-cat >>confdefs.h <<\_ACEOF
-#define HAVE_PTHREAD 1
-_ACEOF
-
-        :
-else
-        acx_pthread_ok=no
-        { { echo "$as_me:$LINENO: error: Error: Cannot figure out how to use pthreads!" >&5
-echo "$as_me: error: Error: Cannot figure out how to use pthreads!" >&2;}
-   { (exit 1); exit 1; }; }
-fi
 ac_ext=c
 ac_cpp='$CPP $CPPFLAGS'
 ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
 ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
 ac_compiler_gnu=$ac_cv_c_compiler_gnu
 
+acx_pthread_ok=no
 
+# We used to check for pthread.h first, but this fails if pthread.h
+# requires special compiler flags (e.g. on True64 or Sequent).
+# It gets checked for in the link test anyway.
 
-# Always define WITH_PTHREADS if we make it this far
-
-cat >>confdefs.h <<\_ACEOF
-#define WITH_PTHREADS 1
-_ACEOF
-
-LDFLAGS="$LDFLAGS "
-CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
-LIBS="$PTHREAD_LIBS $LIBS"
-
-
-	ac_bluegene_loaded=no
-
-
-# Check whether --with-db2-dir was given.
-if test "${with_db2_dir+set}" = set; then
-  withval=$with_db2_dir;  trydb2dir=$withval
-fi
-
-
-	# test for bluegene emulation mode
-
-  	# Check whether --enable-bluegene-emulation was given.
-if test "${enable_bluegene_emulation+set}" = set; then
-  enableval=$enable_bluegene_emulation;  case "$enableval" in
-	  yes) bluegene_emulation=yes ;;
-	  no)  bluegene_emulation=no ;;
-	  *)   { { echo "$as_me:$LINENO: error: bad value \"$enableval\" for --enable-bluegene-emulation" >&5
-echo "$as_me: error: bad value \"$enableval\" for --enable-bluegene-emulation" >&2;}
-   { (exit 1); exit 1; }; }  ;;
-    	esac
-fi
-
-
-  	# Check whether --enable-bgl-emulation was given.
-if test "${enable_bgl_emulation+set}" = set; then
-  enableval=$enable_bgl_emulation;  case "$enableval" in
-	  yes) bgl_emulation=yes ;;
-	  no)  bgl_emulation=no ;;
-	  *)   { { echo "$as_me:$LINENO: error: bad value \"$enableval\" for --enable-bgl-emulation" >&5
-echo "$as_me: error: bad value \"$enableval\" for --enable-bgl-emulation" >&2;}
-   { (exit 1); exit 1; }; }  ;;
-    	esac
-fi
-
-
-	if test "x$bluegene_emulation" = "xyes" -o "x$bgl_emulation" = "xyes"; then
-
-cat >>confdefs.h <<\_ACEOF
-#define HAVE_3D 1
-_ACEOF
-
-
-cat >>confdefs.h <<\_ACEOF
-#define HAVE_BG 1
-_ACEOF
-
-
-cat >>confdefs.h <<\_ACEOF
-#define HAVE_BGL 1
-_ACEOF
-
-
-cat >>confdefs.h <<\_ACEOF
-#define HAVE_FRONT_END 1
-_ACEOF
-
-    		{ echo "$as_me:$LINENO: Running in BG/L emulation mode" >&5
-echo "$as_me: Running in BG/L emulation mode" >&6;}
-		bg_default_dirs=""
- 		#define ac_bluegene_loaded so we don't load another bluegene conf
-		ac_bluegene_loaded=yes
-	else
-  	   	bg_default_dirs="/bgl/BlueLight/ppcfloor/bglsys /opt/IBM/db2/V8.1 /u/bgdb2cli/sqllib /home/bgdb2cli/sqllib"
-	fi
-
-   	for bg_dir in $trydb2dir "" $bg_default_dirs; do
-      	# Skip directories that don't exist
-      		if test ! -z "$bg_dir" -a ! -d "$bg_dir" ; then
-         		continue;
-      		fi
-
-      		# Search for required BG API libraries in the directory
-      		if test -z "$have_bg_ar" -a -f "$bg_dir/lib64/libbglbridge.so" ; then
-         		have_bg_ar=yes
-			bg_bridge_so="$bg_dir/lib64/libbglbridge.so"
-       	 		bg_ldflags="$bg_ldflags -L$bg_dir/lib64 -L/usr/lib64 -Wl,--unresolved-symbols=ignore-in-shared-libs -lbglbridge -lbgldb -ltableapi -lbglmachine -lexpat -lsaymessage"
-        	fi
-
-      		# Search for required DB2 library in the directory
-      		if test -z "$have_db2" -a -f "$bg_dir/lib64/libdb2.so" ; then
-         		have_db2=yes
-	 	 	bg_db2_so="$bg_dir/lib64/libdb2.so"
-       	 		bg_ldflags="$bg_ldflags -L$bg_dir/lib64 -ldb2"
-       		fi
-
-      		# Search for headers in the directory
-      		if test -z "$have_bg_hdr" -a -f "$bg_dir/include/rm_api.h" ; then
-         		have_bg_hdr=yes
-         		bg_includes="-I$bg_dir/include"
-      		fi
-   	done
-
-   	if test ! -z "$have_bg_ar" -a ! -z "$have_bg_hdr" -a ! -z "$have_db2" ; then
-      		# ac_with_readline="no"
-		# Test to make sure the api is good
-                have_bg_files=yes
-      		saved_LDFLAGS="$LDFLAGS"
-      	 	LDFLAGS="$saved_LDFLAGS $bg_ldflags"
-         	cat >conftest.$ac_ext <<_ACEOF
+# First of all, check if the user has set any of the PTHREAD_LIBS,
+# etcetera environment variables, and if threads linking works using
+# them:
+if test x"$PTHREAD_LIBS$PTHREAD_CFLAGS" != x; then
+        save_CFLAGS="$CFLAGS"
+        CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
+        save_LIBS="$LIBS"
+        LIBS="$PTHREAD_LIBS $LIBS"
+        { echo "$as_me:$LINENO: checking for pthread_join in LIBS=$PTHREAD_LIBS with CFLAGS=$PTHREAD_CFLAGS" >&5
+echo $ECHO_N "checking for pthread_join in LIBS=$PTHREAD_LIBS with CFLAGS=$PTHREAD_CFLAGS... $ECHO_C" >&6; }
+        cat >conftest.$ac_ext <<_ACEOF
 /* confdefs.h.  */
 _ACEOF
 cat confdefs.h >>conftest.$ac_ext
 cat >>conftest.$ac_ext <<_ACEOF
 /* end confdefs.h.  */
- int rm_set_serial(char *);
+
+/* Override any GCC internal prototype to avoid an error.
+   Use char because int might match the return type of a GCC
+   builtin and then its argument prototype would still apply.  */
+#ifdef __cplusplus
+extern "C"
+#endif
+char pthread_join ();
 int
 main ()
 {
- rm_set_serial("");
+return pthread_join ();
   ;
   return 0;
 }
@@ -24787,166 +24684,188 @@ eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
 	 test ! -s conftest.err
        } && test -s conftest$ac_exeext &&
        $as_test_x conftest$ac_exeext; then
-  have_bg_files=yes
+  acx_pthread_ok=yes
 else
   echo "$as_me: failed program was:" >&5
 sed 's/^/| /' conftest.$ac_ext >&5
 
-	{ { echo "$as_me:$LINENO: error: There is a problem linking to the BG/L api." >&5
-echo "$as_me: error: There is a problem linking to the BG/L api." >&2;}
-   { (exit 1); exit 1; }; }
+
 fi
 
 rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
       conftest$ac_exeext conftest.$ac_ext
-		LDFLAGS="$saved_LDFLAGS"
-   	fi
-
-  	if test ! -z "$have_bg_files" ; then
-      		BG_INCLUDES="$bg_includes"
+        { echo "$as_me:$LINENO: result: $acx_pthread_ok" >&5
+echo "${ECHO_T}$acx_pthread_ok" >&6; }
+        if test x"$acx_pthread_ok" = xno; then
+                PTHREAD_LIBS=""
+                PTHREAD_CFLAGS=""
+        fi
+        LIBS="$save_LIBS"
+        CFLAGS="$save_CFLAGS"
+fi
 
-cat >>confdefs.h <<\_ACEOF
-#define HAVE_3D 1
-_ACEOF
+# We must check for the threads library under a number of different
+# names; the ordering is very important because some systems
+# (e.g. DEC) have both -lpthread and -lpthreads, where one of the
+# libraries is broken (non-POSIX).
 
+# Create a list of thread flags to try.  Items starting with a "-" are
+# C compiler flags, and other items are library names, except for "none"
+# which indicates that we try without any flags at all.
 
-cat >>confdefs.h <<\_ACEOF
-#define HAVE_BG 1
-_ACEOF
+acx_pthread_flags="pthreads none -Kthread -kthread lthread -pthread -pthreads -mthreads pthread --thread-safe -mt"
 
+# The ordering *is* (sometimes) important.  Some notes on the
+# individual items follow:
 
-cat >>confdefs.h <<\_ACEOF
-#define HAVE_BGL 1
-_ACEOF
+# pthreads: AIX (must check this before -lpthread)
+# none: in case threads are in libc; should be tried before -Kthread and
+#       other compiler flags to prevent continual compiler warnings
+# -Kthread: Sequent (threads in libc, but -Kthread needed for pthread.h)
+# -kthread: FreeBSD kernel threads (preferred to -pthread since SMP-able)
+# lthread: LinuxThreads port on FreeBSD (also preferred to -pthread)
+# -pthread: Linux/gcc (kernel threads), BSD/gcc (userland threads)
+# -pthreads: Solaris/gcc
+# -mthreads: Mingw32/gcc, Lynx/gcc
+# -mt: Sun Workshop C (may only link SunOS threads [-lthread], but it
+#      doesn't hurt to check since this sometimes defines pthreads too;
+#      also defines -D_REENTRANT)
+# pthread: Linux, etcetera
+# --thread-safe: KAI C++
 
+case "${host_cpu}-${host_os}" in
+        *solaris*)
 
-cat >>confdefs.h <<\_ACEOF
-#define HAVE_FRONT_END 1
-_ACEOF
+        # On Solaris (at least, for some versions), libc contains stubbed
+        # (non-functional) versions of the pthreads routines, so link-based
+        # tests will erroneously succeed.  (We need to link with -pthread or
+        # -lpthread.)  (The stubs are missing pthread_cleanup_push, or rather
+        # a function called by this macro, so we could check for that, but
+        # who knows whether they'll stub that too in a future libc.)  So,
+        # we'll just look for -pthreads and -lpthread first:
 
+        acx_pthread_flags="-pthread -pthreads pthread -mt $acx_pthread_flags"
+        ;;
+esac
 
-cat >>confdefs.h <<\_ACEOF
-#define HAVE_BG_FILES 1
-_ACEOF
+if test x"$acx_pthread_ok" = xno; then
+for flag in $acx_pthread_flags; do
 
+        case $flag in
+                none)
+                { echo "$as_me:$LINENO: checking whether pthreads work without any flags" >&5
+echo $ECHO_N "checking whether pthreads work without any flags... $ECHO_C" >&6; }
+                ;;
 
+                -*)
+                { echo "$as_me:$LINENO: checking whether pthreads work with $flag" >&5
+echo $ECHO_N "checking whether pthreads work with $flag... $ECHO_C" >&6; }
+                PTHREAD_CFLAGS="$flag"
+                ;;
 
-cat >>confdefs.h <<_ACEOF
-#define BG_BRIDGE_SO "$bg_bridge_so"
-_ACEOF
+                *)
+                { echo "$as_me:$LINENO: checking for the pthreads library -l$flag" >&5
+echo $ECHO_N "checking for the pthreads library -l$flag... $ECHO_C" >&6; }
+                PTHREAD_LIBS="-l$flag"
+                ;;
+        esac
 
+        save_LIBS="$LIBS"
+        save_CFLAGS="$CFLAGS"
+        LIBS="$PTHREAD_LIBS $LIBS"
+        CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
 
-cat >>confdefs.h <<_ACEOF
-#define BG_DB2_SO "$bg_db2_so"
+        # Check for various functions.  We must include pthread.h,
+        # since some functions may be macros.  (On the Sequent, we
+        # need a special flag -Kthread to make this header compile.)
+        # We check for pthread_join because it is in -lpthread on IRIX
+        # while pthread_create is in libc.  We check for pthread_attr_init
+        # due to DEC craziness with -lpthreads.  We check for
+        # pthread_cleanup_push because it is one of the few pthread
+        # functions on Solaris that doesn't have a non-functional libc stub.
+        # We try pthread_create on general principles.
+        cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h.  */
 _ACEOF
-
-		{ echo "$as_me:$LINENO: checking for BG serial value" >&5
-echo $ECHO_N "checking for BG serial value... $ECHO_C" >&6; }
-      		bg_serial="BGL"
-
-# Check whether --with-bg-serial was given.
-if test "${with_bg_serial+set}" = set; then
-  withval=$with_bg_serial; bg_serial="$withval"
-fi
-
-     		{ echo "$as_me:$LINENO: result: $bg_serial" >&5
-echo "${ECHO_T}$bg_serial" >&6; }
-
-cat >>confdefs.h <<_ACEOF
-#define BG_SERIAL "$bg_serial"
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h.  */
+#include <pthread.h>
+int
+main ()
+{
+pthread_t th; pthread_join(th, 0);
+                     pthread_attr_init(0); pthread_cleanup_push(0, 0);
+                     pthread_create(0,0,0,0); pthread_cleanup_pop(0);
+  ;
+  return 0;
+}
 _ACEOF
-
- 		#define ac_bluegene_loaded so we don't load another bluegene conf
-		ac_bluegene_loaded=yes
-  	fi
-
-
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+  (eval "$ac_link") 2>conftest.er1
+  ac_status=$?
+  grep -v '^ *+' conftest.er1 >conftest.err
+  rm -f conftest.er1
+  cat conftest.err >&5
+  echo "$as_me:$LINENO: \$? = $ac_status" >&5
+  (exit $ac_status); } && {
+	 test -z "$ac_c_werror_flag" ||
+	 test ! -s conftest.err
+       } && test -s conftest$ac_exeext &&
+       $as_test_x conftest$ac_exeext; then
+  acx_pthread_ok=yes
+else
+  echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
 
 
-	# test for bluegene emulation mode
-   	# Check whether --enable-bgp-emulation was given.
-if test "${enable_bgp_emulation+set}" = set; then
-  enableval=$enable_bgp_emulation;  case "$enableval" in
-	  yes) bgp_emulation=yes ;;
-	  no)  bgp_emulation=no ;;
-	  *)   { { echo "$as_me:$LINENO: error: bad value \"$enableval\" for --enable-bgp-emulation" >&5
-echo "$as_me: error: bad value \"$enableval\" for --enable-bgp-emulation" >&2;}
-   { (exit 1); exit 1; }; }  ;;
-    	esac
 fi
 
+rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
+      conftest$ac_exeext conftest.$ac_ext
 
-	# Skip if already set
-   	if test "x$ac_bluegene_loaded" = "xyes" ; then
-		bg_default_dirs=""
-	elif test "x$bgp_emulation" = "xyes"; then
-
-cat >>confdefs.h <<\_ACEOF
-#define HAVE_3D 1
-_ACEOF
-
-
-cat >>confdefs.h <<\_ACEOF
-#define HAVE_BG 1
-_ACEOF
-
-
-cat >>confdefs.h <<\_ACEOF
-#define HAVE_BGP 1
-_ACEOF
-
-
-cat >>confdefs.h <<\_ACEOF
-#define HAVE_FRONT_END 1
-_ACEOF
-
-    		{ echo "$as_me:$LINENO: Running in BG/P emulation mode" >&5
-echo "$as_me: Running in BG/P emulation mode" >&6;}
-		bg_default_dirs=""
- 		#define ac_bluegene_loaded so we don't load another bluegene conf
-		ac_bluegene_loaded=yes
-	else
-		bg_default_dirs="/bgsys/drivers/ppcfloor"
-	fi
-
-	libname=bgpbridge
-
-   	for bg_dir in $trydb2dir "" $bg_default_dirs; do
-      	# Skip directories that don't exist
-      		if test ! -z "$bg_dir" -a ! -d "$bg_dir" ; then
-         		continue;
-      		fi
+        LIBS="$save_LIBS"
+        CFLAGS="$save_CFLAGS"
 
-		soloc=$bg_dir/lib64/lib$libname.so
-      		# Search for required BG API libraries in the directory
-      		if test -z "$have_bg_ar" -a -f "$soloc" ; then
-         		have_bgp_ar=yes
-			bg_ldflags="$bg_ldflags -L$bg_dir/lib64 -L/usr/lib64 -Wl,--unresolved-symbols=ignore-in-shared-libs -l$libname"
-        	fi
+        { echo "$as_me:$LINENO: result: $acx_pthread_ok" >&5
+echo "${ECHO_T}$acx_pthread_ok" >&6; }
+        if test "x$acx_pthread_ok" = xyes; then
+                break;
+        fi
 
-      		# Search for headers in the directory
-      		if test -z "$have_bg_hdr" -a -f "$bg_dir/include/rm_api.h" ; then
-         		have_bgp_hdr=yes
-         		bg_includes="-I$bg_dir/include"
-      		fi
-   	done
+        PTHREAD_LIBS=""
+        PTHREAD_CFLAGS=""
+done
+fi
 
-   	if test ! -z "$have_bgp_ar" -a ! -z "$have_bgp_hdr" ; then
-      		# ac_with_readline="no"
-		# Test to make sure the api is good
-                saved_LDFLAGS="$LDFLAGS"
-      	 	LDFLAGS="$saved_LDFLAGS $bg_ldflags"
-         	cat >conftest.$ac_ext <<_ACEOF
+# Various other checks:
+if test "x$acx_pthread_ok" = xyes; then
+        save_LIBS="$LIBS"
+        LIBS="$PTHREAD_LIBS $LIBS"
+        save_CFLAGS="$CFLAGS"
+        CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
+
+        # Detect AIX lossage: threads are created detached by default
+        # and the JOINABLE attribute has a nonstandard name (UNDETACHED).
+        { echo "$as_me:$LINENO: checking for joinable pthread attribute" >&5
+echo $ECHO_N "checking for joinable pthread attribute... $ECHO_C" >&6; }
+        cat >conftest.$ac_ext <<_ACEOF
 /* confdefs.h.  */
 _ACEOF
 cat confdefs.h >>conftest.$ac_ext
 cat >>conftest.$ac_ext <<_ACEOF
 /* end confdefs.h.  */
- int rm_set_serial(char *);
+#include <pthread.h>
 int
 main ()
 {
- rm_set_serial("");
+int attr=PTHREAD_CREATE_JOINABLE;
   ;
   return 0;
 }
@@ -24969,85 +24888,169 @@ eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
 	 test ! -s conftest.err
        } && test -s conftest$ac_exeext &&
        $as_test_x conftest$ac_exeext; then
-  have_bgp_files=yes
+  ok=PTHREAD_CREATE_JOINABLE
 else
   echo "$as_me: failed program was:" >&5
 sed 's/^/| /' conftest.$ac_ext >&5
 
-	{ { echo "$as_me:$LINENO: error: There is a problem linking to the BG/P api." >&5
-echo "$as_me: error: There is a problem linking to the BG/P api." >&2;}
-   { (exit 1); exit 1; }; }
+	ok=unknown
 fi
 
 rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
       conftest$ac_exeext conftest.$ac_ext
-		LDFLAGS="$saved_LDFLAGS"
-   	fi
-
-  	if test ! -z "$have_bgp_files" ; then
-      		BG_INCLUDES="$bg_includes"
-
-cat >>confdefs.h <<\_ACEOF
-#define HAVE_3D 1
+        if test x"$ok" = xunknown; then
+                cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h.  */
 _ACEOF
-
-
-cat >>confdefs.h <<\_ACEOF
-#define HAVE_BG 1
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h.  */
+#include <pthread.h>
+int
+main ()
+{
+int attr=PTHREAD_CREATE_UNDETACHED;
+  ;
+  return 0;
+}
 _ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+  (eval "$ac_link") 2>conftest.er1
+  ac_status=$?
+  grep -v '^ *+' conftest.er1 >conftest.err
+  rm -f conftest.er1
+  cat conftest.err >&5
+  echo "$as_me:$LINENO: \$? = $ac_status" >&5
+  (exit $ac_status); } && {
+	 test -z "$ac_c_werror_flag" ||
+	 test ! -s conftest.err
+       } && test -s conftest$ac_exeext &&
+       $as_test_x conftest$ac_exeext; then
+  ok=PTHREAD_CREATE_UNDETACHED
+else
+  echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
 
+	ok=unknown
+fi
 
-cat >>confdefs.h <<\_ACEOF
-#define HAVE_BGP 1
-_ACEOF
-
+rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
+      conftest$ac_exeext conftest.$ac_ext
+        fi
+        if test x"$ok" != xPTHREAD_CREATE_JOINABLE; then
 
 cat >>confdefs.h <<\_ACEOF
-#define HAVE_FRONT_END 1
+#define PTHREAD_CREATE_JOINABLE $ok
 _ACEOF
 
+        fi
+        { echo "$as_me:$LINENO: result: ${ok}" >&5
+echo "${ECHO_T}${ok}" >&6; }
+        if test x"$ok" = xunknown; then
+                { echo "$as_me:$LINENO: WARNING: we do not know how to create joinable pthreads" >&5
+echo "$as_me: WARNING: we do not know how to create joinable pthreads" >&2;}
+        fi
 
-cat >>confdefs.h <<\_ACEOF
-#define HAVE_BG_FILES 1
-_ACEOF
+        { echo "$as_me:$LINENO: checking if more special flags are required for pthreads" >&5
+echo $ECHO_N "checking if more special flags are required for pthreads... $ECHO_C" >&6; }
+        flag=no
+        case "${host_cpu}-${host_os}" in
+                *-aix* | *-freebsd*)     flag="-D_THREAD_SAFE";;
+                *solaris* | alpha*-osf*) flag="-D_REENTRANT";;
+        esac
+        { echo "$as_me:$LINENO: result: ${flag}" >&5
+echo "${ECHO_T}${flag}" >&6; }
+        if test "x$flag" != xno; then
+                PTHREAD_CFLAGS="$flag $PTHREAD_CFLAGS"
+        fi
 
+        LIBS="$save_LIBS"
+        CFLAGS="$save_CFLAGS"
 
-cat >>confdefs.h <<_ACEOF
-#define BG_BRIDGE_SO "$soloc"
-_ACEOF
+        # More AIX lossage: must compile with cc_r
+        # Extract the first word of "cc_r", so it can be a program name with args.
+set dummy cc_r; ac_word=$2
+{ echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; }
+if test "${ac_cv_prog_PTHREAD_CC+set}" = set; then
+  echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+  if test -n "$PTHREAD_CC"; then
+  ac_cv_prog_PTHREAD_CC="$PTHREAD_CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+  for ac_exec_ext in '' $ac_executable_extensions; do
+  if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+    ac_cv_prog_PTHREAD_CC="cc_r"
+    echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+done
+IFS=$as_save_IFS
 
+  test -z "$ac_cv_prog_PTHREAD_CC" && ac_cv_prog_PTHREAD_CC="${CC}"
+fi
+fi
+PTHREAD_CC=$ac_cv_prog_PTHREAD_CC
+if test -n "$PTHREAD_CC"; then
+  { echo "$as_me:$LINENO: result: $PTHREAD_CC" >&5
+echo "${ECHO_T}$PTHREAD_CC" >&6; }
+else
+  { echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6; }
+fi
 
-		{ echo "$as_me:$LINENO: checking for BG serial value" >&5
-echo $ECHO_N "checking for BG serial value... $ECHO_C" >&6; }
-        	bg_serial="BGP"
 
-# Check whether --with-bg-serial was given.
-if test "${with_bg_serial+set}" = set; then
-  withval=$with_bg_serial; bg_serial="$withval"
+else
+        PTHREAD_CC="$CC"
 fi
 
-     		{ echo "$as_me:$LINENO: result: $bg_serial" >&5
-echo "${ECHO_T}$bg_serial" >&6; }
 
-cat >>confdefs.h <<_ACEOF
-#define BG_SERIAL "$bg_serial"
-_ACEOF
 
- 		#define ac_bluegene_loaded so we don't load another bluegene conf
-		ac_bluegene_loaded=yes
-   	fi
 
 
+# Finally, execute ACTION-IF-FOUND/ACTION-IF-NOT-FOUND:
+if test x"$acx_pthread_ok" = xyes; then
 
- if test "x$ac_bluegene_loaded" = "xyes"; then
-  BLUEGENE_LOADED_TRUE=
-  BLUEGENE_LOADED_FALSE='#'
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_PTHREAD 1
+_ACEOF
+
+        :
 else
-  BLUEGENE_LOADED_TRUE='#'
-  BLUEGENE_LOADED_FALSE=
+        acx_pthread_ok=no
+        { { echo "$as_me:$LINENO: error: Error: Cannot figure out how to use pthreads!" >&5
+echo "$as_me: error: Error: Cannot figure out how to use pthreads!" >&2;}
+   { (exit 1); exit 1; }; }
 fi
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+
+# Always define WITH_PTHREADS if we make it this far
 
+cat >>confdefs.h <<\_ACEOF
+#define WITH_PTHREADS 1
+_ACEOF
 
+LDFLAGS="$LDFLAGS "
+CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
+LIBS="$PTHREAD_LIBS $LIBS"
 
 
   { echo "$as_me:$LINENO: checking for Sun Constellation system" >&5
@@ -25436,6 +25439,12 @@ fi
     ac_have_gtk="yes"
     _x_ac_pkcfg_bin="no"
 
+    # use the correct libs if running on 64bit
+    if test -d "/usr/lib64/pkgconfig"; then
+	    PKG_CONFIG_PATH="/usr/lib64/pkgconfig/"
+    fi
+
+
 ### Check for pkg-config program
 
 # Check whether --with-pkg-config was given.
@@ -25546,7 +25555,7 @@ echo "$as_me: WARNING: *** pkg-config not found. Cannot probe for libglade-2.0 o
 #    fi
 
 
-### Check for gtk2.7.1 package
+### Check for min gtk package
     if test "$ac_have_gtk" == "yes" ; then
         $HAVEPKGCONFIG --exists gtk+-2.0
         if ! test $? -eq 0 ; then
@@ -25554,18 +25563,14 @@ echo "$as_me: WARNING: *** pkg-config not found. Cannot probe for libglade-2.0 o
 echo "$as_me: WARNING: *** gtk+-2.0 is not available." >&2;}
             ac_have_gtk="no"
 	else
-	   gtk_config_major_version=`$HAVEPKGCONFIG --modversion gtk+-2.0 | \
-             sed 's/\([0-9]*\).\([0-9]*\).\([0-9]*\)/\1/'`
-    	   gtk_config_minor_version=`$HAVEPKGCONFIG --modversion gtk+-2.0 | \
-             sed 's/\([0-9]*\).\([0-9]*\).\([0-9]*\)/\2/'`
-    	   gtk_config_micro_version=`$HAVEPKGCONFIG --modversion gtk+-2.0 | \
-             sed 's/\([0-9]*\).\([0-9]*\).\([0-9]*\)/\3/'`
-
-	   if test $gtk_config_major_version -lt 2 || test $gtk_config_minor_version -lt 7 || test $gtk_config_micro_version -lt 1; then
-	   	{ echo "$as_me:$LINENO: WARNING: *** gtk+-$gtk_config_major_version.$gtk_config_minor_version.$gtk_config_micro_version available, we need >= gtk+-2.7.1 installed for sview." >&5
-echo "$as_me: WARNING: *** gtk+-$gtk_config_major_version.$gtk_config_minor_version.$gtk_config_micro_version available, we need >= gtk+-2.7.1 installed for sview." >&2;}
-            	ac_have_gtk="no"
-	   fi
+	    min_gtk_version="2.7.1"
+	    $HAVEPKGCONFIG --atleast-version=$min_gtk_version gtk+-2.0
+	    if ! test $? -eq 0 ; then
+		    gtk_config_version=`$HAVEPKGCONFIG --modversion gtk+-2.0`
+		    { echo "$as_me:$LINENO: WARNING: *** gtk+-$gtk_config_version available, we need >= gtk+-$min_gtk_version installed for sview." >&5
+echo "$as_me: WARNING: *** gtk+-$gtk_config_version available, we need >= gtk+-$min_gtk_version installed for sview." >&2;}
+		    ac_have_gtk="no"
+	    fi
         fi
     fi
 
@@ -27518,7 +27523,7 @@ _ACEOF
 
 
 
-ac_config_files="$ac_config_files Makefile config.xml auxdir/Makefile contribs/Makefile contribs/perlapi/Makefile contribs/perlapi/libslurm-perl/Makefile.PL contribs/torque/Makefile contribs/phpext/Makefile contribs/phpext/slurm_php/config.m4 contribs/python/Makefile contribs/python/hostlist/Makefile contribs/python/hostlist/test/Makefile src/Makefile src/api/Makefile src/common/Makefile src/database/Makefile src/sacct/Makefile src/sacctmgr/Makefile src/sreport/Makefile src/sstat/Makefile src/salloc/Makefile src/sbatch/Makefile src/sattach/Makefile src/srun/Makefile src/slurmd/Makefile src/slurmd/slurmd/Makefile src/slurmd/slurmstepd/Makefile src/slurmdbd/Makefile src/slurmctld/Makefile src/sbcast/Makefile src/scontrol/Makefile src/scancel/Makefile src/squeue/Makefile src/sinfo/Makefile src/smap/Makefile src/strigger/Makefile src/sview/Makefile src/plugins/Makefile src/plugins/accounting_storage/Makefile src/plugins/accounting_storage/filetxt/Makefile src/plugins/accounting_storage/mysql/Makefile src/plugins/accounting_storage/pgsql/Makefile src/plugins/accounting_storage/none/Makefile src/plugins/accounting_storage/slurmdbd/Makefile src/plugins/auth/Makefile src/plugins/auth/authd/Makefile src/plugins/auth/munge/Makefile src/plugins/auth/none/Makefile src/plugins/checkpoint/Makefile src/plugins/checkpoint/aix/Makefile src/plugins/checkpoint/none/Makefile src/plugins/checkpoint/ompi/Makefile src/plugins/checkpoint/xlch/Makefile src/plugins/crypto/Makefile src/plugins/crypto/munge/Makefile src/plugins/crypto/openssl/Makefile src/plugins/jobacct_gather/Makefile src/plugins/jobacct_gather/linux/Makefile src/plugins/jobacct_gather/aix/Makefile src/plugins/jobacct_gather/none/Makefile src/plugins/jobcomp/Makefile src/plugins/jobcomp/filetxt/Makefile src/plugins/jobcomp/none/Makefile src/plugins/jobcomp/script/Makefile src/plugins/jobcomp/mysql/Makefile src/plugins/jobcomp/pgsql/Makefile src/plugins/proctrack/Makefile src/plugins/proctrack/aix/Makefile src/plugins/proctrack/pgid/Makefile src/plugins/proctrack/linuxproc/Makefile src/plugins/proctrack/rms/Makefile src/plugins/proctrack/sgi_job/Makefile src/plugins/sched/Makefile src/plugins/sched/backfill/Makefile src/plugins/sched/builtin/Makefile src/plugins/sched/gang/Makefile src/plugins/sched/hold/Makefile src/plugins/sched/wiki/Makefile src/plugins/sched/wiki2/Makefile src/plugins/select/Makefile src/plugins/select/bluegene/Makefile src/plugins/select/bluegene/block_allocator/Makefile src/plugins/select/bluegene/plugin/Makefile src/plugins/select/linear/Makefile src/plugins/select/cons_res/Makefile src/plugins/switch/Makefile src/plugins/switch/elan/Makefile src/plugins/switch/none/Makefile src/plugins/switch/federation/Makefile src/plugins/mpi/Makefile src/plugins/mpi/mpich1_p4/Makefile src/plugins/mpi/mpich1_shmem/Makefile src/plugins/mpi/mpichgm/Makefile src/plugins/mpi/mpichmx/Makefile src/plugins/mpi/mvapich/Makefile src/plugins/mpi/lam/Makefile src/plugins/mpi/none/Makefile src/plugins/mpi/openmpi/Makefile src/plugins/task/Makefile src/plugins/task/affinity/Makefile src/plugins/task/none/Makefile doc/Makefile doc/man/Makefile doc/html/Makefile doc/html/configurator.html testsuite/Makefile testsuite/expect/Makefile testsuite/slurm_unit/Makefile testsuite/slurm_unit/api/Makefile testsuite/slurm_unit/api/manual/Makefile testsuite/slurm_unit/common/Makefile testsuite/slurm_unit/slurmctld/Makefile testsuite/slurm_unit/slurmd/Makefile testsuite/slurm_unit/slurmdbd/Makefile"
+ac_config_files="$ac_config_files Makefile config.xml auxdir/Makefile contribs/Makefile contribs/perlapi/Makefile contribs/perlapi/libslurm-perl/Makefile.PL contribs/torque/Makefile contribs/phpext/Makefile contribs/phpext/slurm_php/config.m4 contribs/python/Makefile contribs/python/hostlist/Makefile contribs/python/hostlist/test/Makefile contribs/slurmdb-direct/Makefile src/Makefile src/api/Makefile src/common/Makefile src/database/Makefile src/sacct/Makefile src/sacctmgr/Makefile src/sreport/Makefile src/sstat/Makefile src/salloc/Makefile src/sbatch/Makefile src/sattach/Makefile src/srun/Makefile src/slurmd/Makefile src/slurmd/slurmd/Makefile src/slurmd/slurmstepd/Makefile src/slurmdbd/Makefile src/slurmctld/Makefile src/sbcast/Makefile src/scontrol/Makefile src/scancel/Makefile src/squeue/Makefile src/sinfo/Makefile src/smap/Makefile src/strigger/Makefile src/sview/Makefile src/plugins/Makefile src/plugins/accounting_storage/Makefile src/plugins/accounting_storage/filetxt/Makefile src/plugins/accounting_storage/mysql/Makefile src/plugins/accounting_storage/pgsql/Makefile src/plugins/accounting_storage/none/Makefile src/plugins/accounting_storage/slurmdbd/Makefile src/plugins/auth/Makefile src/plugins/auth/authd/Makefile src/plugins/auth/munge/Makefile src/plugins/auth/none/Makefile src/plugins/checkpoint/Makefile src/plugins/checkpoint/aix/Makefile src/plugins/checkpoint/none/Makefile src/plugins/checkpoint/ompi/Makefile src/plugins/checkpoint/xlch/Makefile src/plugins/crypto/Makefile src/plugins/crypto/munge/Makefile src/plugins/crypto/openssl/Makefile src/plugins/jobacct_gather/Makefile src/plugins/jobacct_gather/linux/Makefile src/plugins/jobacct_gather/aix/Makefile src/plugins/jobacct_gather/none/Makefile src/plugins/jobcomp/Makefile src/plugins/jobcomp/filetxt/Makefile src/plugins/jobcomp/none/Makefile src/plugins/jobcomp/script/Makefile src/plugins/jobcomp/mysql/Makefile src/plugins/jobcomp/pgsql/Makefile src/plugins/proctrack/Makefile src/plugins/proctrack/aix/Makefile src/plugins/proctrack/pgid/Makefile src/plugins/proctrack/linuxproc/Makefile src/plugins/proctrack/rms/Makefile src/plugins/proctrack/sgi_job/Makefile src/plugins/sched/Makefile src/plugins/sched/backfill/Makefile src/plugins/sched/builtin/Makefile src/plugins/sched/gang/Makefile src/plugins/sched/hold/Makefile src/plugins/sched/wiki/Makefile src/plugins/sched/wiki2/Makefile src/plugins/select/Makefile src/plugins/select/bluegene/Makefile src/plugins/select/bluegene/block_allocator/Makefile src/plugins/select/bluegene/plugin/Makefile src/plugins/select/linear/Makefile src/plugins/select/cons_res/Makefile src/plugins/switch/Makefile src/plugins/switch/elan/Makefile src/plugins/switch/none/Makefile src/plugins/switch/federation/Makefile src/plugins/mpi/Makefile src/plugins/mpi/mpich1_p4/Makefile src/plugins/mpi/mpich1_shmem/Makefile src/plugins/mpi/mpichgm/Makefile src/plugins/mpi/mpichmx/Makefile src/plugins/mpi/mvapich/Makefile src/plugins/mpi/lam/Makefile src/plugins/mpi/none/Makefile src/plugins/mpi/openmpi/Makefile src/plugins/task/Makefile src/plugins/task/affinity/Makefile src/plugins/task/none/Makefile doc/Makefile doc/man/Makefile doc/html/Makefile doc/html/configurator.html testsuite/Makefile testsuite/expect/Makefile testsuite/slurm_unit/Makefile testsuite/slurm_unit/api/Makefile testsuite/slurm_unit/api/manual/Makefile testsuite/slurm_unit/common/Makefile testsuite/slurm_unit/slurmctld/Makefile testsuite/slurm_unit/slurmd/Makefile testsuite/slurm_unit/slurmdbd/Makefile"
 
 
 cat >confcache <<\_ACEOF
@@ -27624,13 +27629,6 @@ echo "$as_me: error: conditional \"MAINTAINER_MODE\" was never defined.
 Usually this means the macro was only invoked conditionally." >&2;}
    { (exit 1); exit 1; }; }
 fi
-if test -z "${HAVE_AIX_TRUE}" && test -z "${HAVE_AIX_FALSE}"; then
-  { { echo "$as_me:$LINENO: error: conditional \"HAVE_AIX\" was never defined.
-Usually this means the macro was only invoked conditionally." >&5
-echo "$as_me: error: conditional \"HAVE_AIX\" was never defined.
-Usually this means the macro was only invoked conditionally." >&2;}
-   { (exit 1); exit 1; }; }
-fi
 if test -z "${AMDEP_TRUE}" && test -z "${AMDEP_FALSE}"; then
   { { echo "$as_me:$LINENO: error: conditional \"AMDEP\" was never defined.
 Usually this means the macro was only invoked conditionally." >&5
@@ -27645,6 +27643,20 @@ echo "$as_me: error: conditional \"am__fastdepCC\" was never defined.
 Usually this means the macro was only invoked conditionally." >&2;}
    { (exit 1); exit 1; }; }
 fi
+if test -z "${BLUEGENE_LOADED_TRUE}" && test -z "${BLUEGENE_LOADED_FALSE}"; then
+  { { echo "$as_me:$LINENO: error: conditional \"BLUEGENE_LOADED\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+echo "$as_me: error: conditional \"BLUEGENE_LOADED\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+   { (exit 1); exit 1; }; }
+fi
+if test -z "${HAVE_AIX_TRUE}" && test -z "${HAVE_AIX_FALSE}"; then
+  { { echo "$as_me:$LINENO: error: conditional \"HAVE_AIX\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+echo "$as_me: error: conditional \"HAVE_AIX\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+   { (exit 1); exit 1; }; }
+fi
 if test -z "${HAVE_AIX_PROCTRACK_TRUE}" && test -z "${HAVE_AIX_PROCTRACK_FALSE}"; then
   { { echo "$as_me:$LINENO: error: conditional \"HAVE_AIX_PROCTRACK\" was never defined.
 Usually this means the macro was only invoked conditionally." >&5
@@ -27701,13 +27713,6 @@ echo "$as_me: error: conditional \"HAVE_UNSETENV\" was never defined.
 Usually this means the macro was only invoked conditionally." >&2;}
    { (exit 1); exit 1; }; }
 fi
-if test -z "${BLUEGENE_LOADED_TRUE}" && test -z "${BLUEGENE_LOADED_FALSE}"; then
-  { { echo "$as_me:$LINENO: error: conditional \"BLUEGENE_LOADED\" was never defined.
-Usually this means the macro was only invoked conditionally." >&5
-echo "$as_me: error: conditional \"BLUEGENE_LOADED\" was never defined.
-Usually this means the macro was only invoked conditionally." >&2;}
-   { (exit 1); exit 1; }; }
-fi
 if test -z "${HAVE_SOME_CURSES_TRUE}" && test -z "${HAVE_SOME_CURSES_FALSE}"; then
   { { echo "$as_me:$LINENO: error: conditional \"HAVE_SOME_CURSES\" was never defined.
 Usually this means the macro was only invoked conditionally." >&5
@@ -28253,6 +28258,7 @@ do
     "contribs/python/Makefile") CONFIG_FILES="$CONFIG_FILES contribs/python/Makefile" ;;
     "contribs/python/hostlist/Makefile") CONFIG_FILES="$CONFIG_FILES contribs/python/hostlist/Makefile" ;;
     "contribs/python/hostlist/test/Makefile") CONFIG_FILES="$CONFIG_FILES contribs/python/hostlist/test/Makefile" ;;
+    "contribs/slurmdb-direct/Makefile") CONFIG_FILES="$CONFIG_FILES contribs/slurmdb-direct/Makefile" ;;
     "src/Makefile") CONFIG_FILES="$CONFIG_FILES src/Makefile" ;;
     "src/api/Makefile") CONFIG_FILES="$CONFIG_FILES src/api/Makefile" ;;
     "src/common/Makefile") CONFIG_FILES="$CONFIG_FILES src/common/Makefile" ;;
@@ -28502,18 +28508,18 @@ am__untar!$am__untar$ac_delim
 MAINTAINER_MODE_TRUE!$MAINTAINER_MODE_TRUE$ac_delim
 MAINTAINER_MODE_FALSE!$MAINTAINER_MODE_FALSE$ac_delim
 MAINT!$MAINT$ac_delim
-CMD_LDFLAGS!$CMD_LDFLAGS$ac_delim
-LIB_LDFLAGS!$LIB_LDFLAGS$ac_delim
-SO_LDFLAGS!$SO_LDFLAGS$ac_delim
-HAVE_AIX_TRUE!$HAVE_AIX_TRUE$ac_delim
-HAVE_AIX_FALSE!$HAVE_AIX_FALSE$ac_delim
-HAVE_AIX!$HAVE_AIX$ac_delim
-PROCTRACKDIR!$PROCTRACKDIR$ac_delim
 CC!$CC$ac_delim
 CFLAGS!$CFLAGS$ac_delim
 LDFLAGS!$LDFLAGS$ac_delim
 CPPFLAGS!$CPPFLAGS$ac_delim
 ac_ct_CC!$ac_ct_CC$ac_delim
+EXEEXT!$EXEEXT$ac_delim
+OBJEXT!$OBJEXT$ac_delim
+DEPDIR!$DEPDIR$ac_delim
+am__include!$am__include$ac_delim
+am__quote!$am__quote$ac_delim
+AMDEP_TRUE!$AMDEP_TRUE$ac_delim
+AMDEP_FALSE!$AMDEP_FALSE$ac_delim
 _ACEOF
 
   if test `sed -n "s/.*$ac_delim\$/X/p" conf$$subs.sed | grep -c X` = 97; then
@@ -28555,17 +28561,21 @@ _ACEOF
 ac_delim='%!_!# '
 for ac_last_try in false false false false false :; do
   cat >conf$$subs.sed <<_ACEOF
-EXEEXT!$EXEEXT$ac_delim
-OBJEXT!$OBJEXT$ac_delim
-DEPDIR!$DEPDIR$ac_delim
-am__include!$am__include$ac_delim
-am__quote!$am__quote$ac_delim
-AMDEP_TRUE!$AMDEP_TRUE$ac_delim
-AMDEP_FALSE!$AMDEP_FALSE$ac_delim
 AMDEPBACKSLASH!$AMDEPBACKSLASH$ac_delim
 CCDEPMODE!$CCDEPMODE$ac_delim
 am__fastdepCC_TRUE!$am__fastdepCC_TRUE$ac_delim
 am__fastdepCC_FALSE!$am__fastdepCC_FALSE$ac_delim
+BG_INCLUDES!$BG_INCLUDES$ac_delim
+BLUEGENE_LOADED_TRUE!$BLUEGENE_LOADED_TRUE$ac_delim
+BLUEGENE_LOADED_FALSE!$BLUEGENE_LOADED_FALSE$ac_delim
+BLUEGENE_LOADED!$BLUEGENE_LOADED$ac_delim
+CMD_LDFLAGS!$CMD_LDFLAGS$ac_delim
+LIB_LDFLAGS!$LIB_LDFLAGS$ac_delim
+SO_LDFLAGS!$SO_LDFLAGS$ac_delim
+HAVE_AIX_TRUE!$HAVE_AIX_TRUE$ac_delim
+HAVE_AIX_FALSE!$HAVE_AIX_FALSE$ac_delim
+HAVE_AIX!$HAVE_AIX$ac_delim
+PROCTRACKDIR!$PROCTRACKDIR$ac_delim
 CPP!$CPP$ac_delim
 GREP!$GREP$ac_delim
 EGREP!$EGREP$ac_delim
@@ -28606,10 +28616,6 @@ HAVE_UNSETENV_FALSE!$HAVE_UNSETENV_FALSE$ac_delim
 PTHREAD_CC!$PTHREAD_CC$ac_delim
 PTHREAD_LIBS!$PTHREAD_LIBS$ac_delim
 PTHREAD_CFLAGS!$PTHREAD_CFLAGS$ac_delim
-BG_INCLUDES!$BG_INCLUDES$ac_delim
-BLUEGENE_LOADED_TRUE!$BLUEGENE_LOADED_TRUE$ac_delim
-BLUEGENE_LOADED_FALSE!$BLUEGENE_LOADED_FALSE$ac_delim
-BLUEGENE_LOADED!$BLUEGENE_LOADED$ac_delim
 SEMAPHORE_SOURCES!$SEMAPHORE_SOURCES$ac_delim
 SEMAPHORE_LIBS!$SEMAPHORE_LIBS$ac_delim
 NCURSES!$NCURSES$ac_delim
diff --git a/configure.ac b/configure.ac
index 98129b1d5..6acdcaf66 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1,4 +1,4 @@
-# $Id: configure.ac 16088 2008-12-29 21:56:17Z jette $
+# $Id: configure.ac 16936 2009-03-18 22:02:01Z da $
 # This file is to be processed with autoconf to generate a configure script
 
 dnl Prologue
@@ -24,6 +24,12 @@ AM_MAINTAINER_MODE
 AC_CONFIG_HEADERS([config.h])
 AC_CONFIG_HEADERS([slurm/slurm.h])
 
+dnl This needs to be close to the front to set CFLAGS=-m64
+X_AC_BGL
+X_AC_BGP
+AM_CONDITIONAL(BLUEGENE_LOADED, test "x$ac_bluegene_loaded" = "xyes")
+AC_SUBST(BLUEGENE_LOADED)
+
 X_AC_AIX
 
 dnl
@@ -133,11 +139,6 @@ LDFLAGS="$LDFLAGS "
 CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
 LIBS="$PTHREAD_LIBS $LIBS"
 
-X_AC_BGL
-X_AC_BGP
-AM_CONDITIONAL(BLUEGENE_LOADED, test "x$ac_bluegene_loaded" = "xyes")
-AC_SUBST(BLUEGENE_LOADED)
-
 X_AC_SUN_CONST
 
 X_AC_CFLAGS
@@ -296,6 +297,7 @@ AC_CONFIG_FILES([Makefile
 		 contribs/python/Makefile
 		 contribs/python/hostlist/Makefile
 		 contribs/python/hostlist/test/Makefile
+		 contribs/slurmdb-direct/Makefile
 		 src/Makefile 
 		 src/api/Makefile 
 		 src/common/Makefile
diff --git a/contribs/Makefile.am b/contribs/Makefile.am
index 7bf4cef72..5c63fda34 100644
--- a/contribs/Makefile.am
+++ b/contribs/Makefile.am
@@ -1,4 +1,4 @@
-SUBDIRS = perlapi python torque 
+SUBDIRS = perlapi python torque slurmdb-direct
 
 EXTRA_DIST = \
 	env_cache_builder.c	\
diff --git a/contribs/Makefile.in b/contribs/Makefile.in
index decc1c5be..0b1295f61 100644
--- a/contribs/Makefile.in
+++ b/contribs/Makefile.in
@@ -248,7 +248,7 @@ target_os = @target_os@
 target_vendor = @target_vendor@
 top_builddir = @top_builddir@
 top_srcdir = @top_srcdir@
-SUBDIRS = perlapi python torque 
+SUBDIRS = perlapi python torque slurmdb-direct
 EXTRA_DIST = \
 	env_cache_builder.c	\
 	make.slurm.patch	\
diff --git a/contribs/make.slurm.patch b/contribs/make.slurm.patch
index f5bd6134d..df23aa729 100644
--- a/contribs/make.slurm.patch
+++ b/contribs/make.slurm.patch
@@ -30,7 +30,7 @@ Index: job.c
  child_execute_job (int stdin_fd, int stdout_fd, char **argv, char **envp)
  {
 +/* PARALLEL JOB LAUNCH VIA SLURM */
-+  if (getenv("SLURM_JOBID") {
++  if (getenv("SLURM_JOBID")) {
 +    int i;
 +    static char *argx[128];
 +    argx[0] = "srun";
diff --git a/contribs/perlapi/libslurm-perl/Slurm.xs b/contribs/perlapi/libslurm-perl/Slurm.xs
index b92833d1f..72475ed7f 100644
--- a/contribs/perlapi/libslurm-perl/Slurm.xs
+++ b/contribs/perlapi/libslurm-perl/Slurm.xs
@@ -25,6 +25,7 @@ extern int slurm_hostlist_push(hostlist_t hl, const char *hosts);
 extern int slurm_hostlist_push_host(hostlist_t hl, const char *host);
 extern int slurm_hostlist_find(hostlist_t hl, const char *hostname);
 extern size_t slurm_hostlist_ranged_string(hostlist_t hl, size_t n, char *buf);
+extern void slurm_hostlist_uniq(hostlist_t hl);
 
 struct slurm {
 	node_info_msg_t *node_info_msg;
@@ -804,6 +805,11 @@ slurm_hostlist_ranged_string(hostlist_t hl = NULL)
 	OUTPUT:
 		RETVAL
 
+void
+slurm_hostlist_uniq(hostlist_t hl = NULL)
+        CODE:
+		slurm_hostlist_uniq(hl);
+
 void
 DESTROY(hl)
 		hostlist_t hl=NULL;
diff --git a/contribs/python/hostlist/CHANGES b/contribs/python/hostlist/CHANGES
index 292217818..86f30ec84 100644
--- a/contribs/python/hostlist/CHANGES
+++ b/contribs/python/hostlist/CHANGES
@@ -1,3 +1,22 @@
+Version 1.5 (2009-02-22)
+
+    Make each "-" on the command line count as one hostlist argument.
+    If multiple hostslists are given on stdin they are combined to a
+    union hostlist before being used in the way requested by the
+    options.
+
+    Add hostgrep utility to search for lines matching a hostlist.
+
+    Make the build system (used when building tar.gz and RPMs from the
+    source code held in git) smarter.
+
+Version 1.4 (2008-12-28)
+
+    Support Python 3.
+
+    Import reduce from functools if possible.
+    Use Python 2/3 installation trick from .../Demo/distutils/test2to3
+
 Version 1.3 (2008-09-30)
 
     Add -s/--separator, -p/--prepend, -a/--append and --version
diff --git a/contribs/python/hostlist/PKG-INFO b/contribs/python/hostlist/PKG-INFO
index cf6353023..f66980ccb 100644
--- a/contribs/python/hostlist/PKG-INFO
+++ b/contribs/python/hostlist/PKG-INFO
@@ -1,6 +1,6 @@
 Metadata-Version: 1.0
 Name: python-hostlist
-Version: 1.3
+Version: 1.5
 Summary: Python module for hostlist handling
 Home-page: http://www.nsc.liu.se/~kent/python-hostlist/
 Author: Kent Engström
@@ -14,3 +14,5 @@ Classifier: Intended Audience :: System Administrators
 Classifier: License :: OSI Approved :: GNU General Public License (GPL)
 Classifier: Topic :: System :: Clustering
 Classifier: Topic :: System :: Systems Administration
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 3
diff --git a/contribs/python/hostlist/README b/contribs/python/hostlist/README
index 933cd164b..ebb9d5278 100644
--- a/contribs/python/hostlist/README
+++ b/contribs/python/hostlist/README
@@ -1,3 +1,6 @@
+INTRODUCTION
+============
+
 The Python module hostlist.py knows how to expand and collect hostlist
 expressions. Example:
 
@@ -32,22 +35,27 @@ perform set operations on them. Example:
   n9
   n10
 
-Install directly by running
 
-  python setup.py build   (as yourself)
-  python setup.py install (as root) 
+BUILDING
+========
 
-or just copy the hostlist.py, hostlist and hostlist.1 files to appropriate places.
+Build RPM packages from the tar.gz archive by running:
 
-Build RPM packages by running:
+  rpmbuild -ta python-hostlist-1.5.tar.gz
 
-  rpmbuild -ta python-hostlist-VERSION.tar.gz
-
-If you do not have the tar archive, build RPM packages by running:
+If you do not have the tar archive, create it first:
 
   python setup.py sdist
-  cp dist/python-hostlist-VERSION.tar.gz ~/rpmbuild/SOURCES
-  rpmbuild -ba python-hostlist.spec  
+  rpmbuild -ta dist/python-hostlist-1.5.tar.gz
+
+You may also install directly by running:
+
+  python setup.py build   (as yourself)
+  python setup.py install (as root) 
+
+
+RELEASES AND FEEDBACK
+=====================
 
 You will find new releases at:
 
diff --git a/contribs/python/hostlist/hostlist.py b/contribs/python/hostlist/hostlist.py
index f2c2adac1..b3b520336 100755
--- a/contribs/python/hostlist/hostlist.py
+++ b/contribs/python/hostlist/hostlist.py
@@ -34,7 +34,7 @@ corner cases the behaviour of this module have not been compared for
 compatibility with pdsh/dshbak/SLURM et al.
 """
 
-__version__ = "1.3"
+__version__ = "1.5"
 
 import re
 import itertools
@@ -50,7 +50,7 @@ MAX_SIZE = 100000
 def expand_hostlist(hostlist, allow_duplicates=False, sort=False):
     """Expand a hostlist expression string to a Python list.
 
-    Exemple: expand_hostlist("n[9-11],d[01-02]") ==> 
+    Example: expand_hostlist("n[9-11],d[01-02]") ==> 
              ['n9', 'n10', 'n11', 'd01', 'd02']
 
     Unless allow_duplicates is true, duplicates will be purged
diff --git a/contribs/python/hostlist/python-hostlist.spec b/contribs/python/hostlist/python-hostlist.spec
index 7f0efc5f3..9c2ce0df1 100644
--- a/contribs/python/hostlist/python-hostlist.spec
+++ b/contribs/python/hostlist/python-hostlist.spec
@@ -1,7 +1,7 @@
 %{!?python_sitelib: %define python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")}
 
 Name:           python-hostlist
-Version:        1.3
+Version:        1.5
 Release:        1
 Summary:        Python module for hostlist handling
 Vendor:         NSC
@@ -45,5 +45,7 @@ rm -rf $RPM_BUILD_ROOT
 %doc CHANGES
 %{python_sitelib}/*
 /usr/bin/hostlist
+/usr/bin/hostgrep
 /usr/share/man/man1/hostlist.1.gz
+/usr/share/man/man1/hostgrep.1.gz
 %changelog
diff --git a/contribs/python/hostlist/setup.py b/contribs/python/hostlist/setup.py
index 0afd28097..57199e707 100644
--- a/contribs/python/hostlist/setup.py
+++ b/contribs/python/hostlist/setup.py
@@ -2,8 +2,27 @@
 
 from distutils.core import setup
 
+# Python 2/3 installation trick from .../Demo/distutils/test2to3
+try:
+    from distutils.command.build_py import build_py_2to3 as build_py
+except ImportError:
+    from distutils.command.build_py import build_py
+
+try:
+    from distutils.command.build_scripts import build_scripts_2to3 as build_scripts
+except ImportError:
+    from distutils.command.build_scripts import build_scripts
+
+# Version
+VERSION = "1.5"
+if "#" in VERSION:
+    import sys
+    sys.stderr.write("Bad version %s\n" % VERSION)
+    sys.exit(1)
+
+
 setup(name         = "python-hostlist",
-      version      = "1.3", # Change in hostlist{,.py,.1}, python-hostlist.spec too!
+      version      = VERSION,
       description  = "Python module for hostlist handling",
       long_description = "The hostlist.py module knows how to expand and collect hostlist expressions.",
       author       = "Kent Engström",
@@ -16,8 +35,14 @@ setup(name         = "python-hostlist",
                       'License :: OSI Approved :: GNU General Public License (GPL)',
                       'Topic :: System :: Clustering',
                       'Topic :: System :: Systems Administration',
+                      'Programming Language :: Python :: 2',
+                      'Programming Language :: Python :: 3',
                       ],
       py_modules   = ["hostlist"],
-      scripts      = ["hostlist"],
-      data_files   = [("share/man/man1", ["hostlist.1"])],
+      scripts      = ["hostlist", "hostgrep"],
+      data_files   = [("share/man/man1", ["hostlist.1",
+                                          "hostgrep.1"])],
+      cmdclass     = {'build_py':build_py,
+                      'build_scripts':build_scripts,
+                      }
       )
diff --git a/contribs/slurmdb-direct/Makefile.am b/contribs/slurmdb-direct/Makefile.am
new file mode 100644
index 000000000..24646f9ce
--- /dev/null
+++ b/contribs/slurmdb-direct/Makefile.am
@@ -0,0 +1,39 @@
+#
+# Makefile for slurmdb-direct scripts
+
+AUTOMAKE_OPTIONS = foreign
+
+sbin_SCRIPTS = moab_2_slurmdb
+extra = config.slurmdb.pl
+
+moab_2_slurmdb:
+
+_perldir=$(exec_prefix)`perl -e 'use Config; $$T=$$Config{installsitearch}; $$P=$$Config{installprefix}; $$P1="$$P/local"; $$T =~ s/$$P1//; $$T =~ s/$$P//; print $$T;'`
+
+install-sbinSCRIPTS: $(sbin_SCRIPTS)
+	@$(NORMAL_INSTALL)
+	test -z "$(DESTDIR)$(sbindir)" || $(MKDIR_P) "$(DESTDIR)$(sbindir)"
+	@list='$(sbin_SCRIPTS)'; for p in $$list; do \
+	   echo "sed 's%use lib .*%use lib qw(${_perldir});%' $(top_srcdir)/contribs/slurmdb-direct/$$p.pl > $(DESTDIR)$(sbindir)/$$p"; \
+	   sed "s%use lib .*%use lib qw(${_perldir});%" $(top_srcdir)/contribs/slurmdb-direct/$$p.pl >$(DESTDIR)$(sbindir)/$$p; \
+	   chmod 755 $(DESTDIR)$(sbindir)/$$p;\
+	done
+
+	test -z "$(DESTDIR)${_perldir}" || $(MKDIR_P) "$(DESTDIR)${_perldir}"
+	@list='$(extra)'; for p in $$list; do \
+	   echo " cp $(top_srcdir)/contribs/slurmdb-direct/$$p $(DESTDIR)${_perldir}/$$p"; \
+	   cp $(top_srcdir)/contribs/slurmdb-direct/$$p $(DESTDIR)${_perldir}/$$p; \
+	done
+
+uninstall-sbinSCRIPTS:
+	@$(NORMAL_UNINSTALL)
+	@list='$(sbin_SCRIPTS)'; for p in $$list; do \
+	  echo " rm -f '$(DESTDIR)$(sbindir)/$$p'"; \
+	  rm -f "$(DESTDIR)$(sbindir)/$$p"; \
+	done
+	@list='$(extra)'; for p in $$list; do \
+	  echo " rm -f '$(DESTDIR)$(_perldir)/$$p'"; \
+	  rm -f "$(DESTDIR)$(_perldir)/$$p"; \
+	done
+
+clean:
diff --git a/contribs/slurmdb-direct/Makefile.in b/contribs/slurmdb-direct/Makefile.in
new file mode 100644
index 000000000..7ecb82e2d
--- /dev/null
+++ b/contribs/slurmdb-direct/Makefile.in
@@ -0,0 +1,450 @@
+# Makefile.in generated by automake 1.10.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008  Free Software Foundation, Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+#
+# Makefile for slurmdb-direct scripts
+
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+subdir = contribs/slurmdb-direct
+DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
+	$(top_srcdir)/auxdir/slurm.m4 \
+	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
+	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
+	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
+	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_databases.m4 \
+	$(top_srcdir)/auxdir/x_ac_debug.m4 \
+	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_federation.m4 \
+	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
+	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_munge.m4 \
+	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_pam.m4 \
+	$(top_srcdir)/auxdir/x_ac_ptrace.m4 \
+	$(top_srcdir)/auxdir/x_ac_readline.m4 \
+	$(top_srcdir)/auxdir/x_ac_setpgrp.m4 \
+	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
+	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
+	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
+	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
+CONFIG_CLEAN_FILES =
+am__installdirs = "$(DESTDIR)$(sbindir)"
+sbinSCRIPT_INSTALL = $(INSTALL_SCRIPT)
+SCRIPTS = $(sbin_SCRIPTS)
+SOURCES =
+DIST_SOURCES =
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AR = @AR@
+AUTHD_CFLAGS = @AUTHD_CFLAGS@
+AUTHD_LIBS = @AUTHD_LIBS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BG_INCLUDES = @BG_INCLUDES@
+BLUEGENE_LOADED = @BLUEGENE_LOADED@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CMD_LDFLAGS = @CMD_LDFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DSYMUTIL = @DSYMUTIL@
+ECHO = @ECHO@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+ELAN_LIBS = @ELAN_LIBS@
+EXEEXT = @EXEEXT@
+F77 = @F77@
+FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@
+FFLAGS = @FFLAGS@
+GREP = @GREP@
+GTK2_CFLAGS = @GTK2_CFLAGS@
+GTK2_LIBS = @GTK2_LIBS@
+HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@
+HAVEPGCONFIG = @HAVEPGCONFIG@
+HAVEPKGCONFIG = @HAVEPKGCONFIG@
+HAVE_AIX = @HAVE_AIX@
+HAVE_ELAN = @HAVE_ELAN@
+HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_OPENSSL = @HAVE_OPENSSL@
+HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LDFLAGS = @LDFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIB_LDFLAGS = @LIB_LDFLAGS@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@
+MUNGE_LDFLAGS = @MUNGE_LDFLAGS@
+MUNGE_LIBS = @MUNGE_LIBS@
+MYSQL_CFLAGS = @MYSQL_CFLAGS@
+MYSQL_LIBS = @MYSQL_LIBS@
+NCURSES = @NCURSES@
+NMEDIT = @NMEDIT@
+NUMA_LIBS = @NUMA_LIBS@
+OBJEXT = @OBJEXT@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PAM_LIBS = @PAM_LIBS@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PGSQL_CFLAGS = @PGSQL_CFLAGS@
+PGSQL_LIBS = @PGSQL_LIBS@
+PLPA_LIBS = @PLPA_LIBS@
+PROCTRACKDIR = @PROCTRACKDIR@
+PROJECT = @PROJECT@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+READLINE_LIBS = @READLINE_LIBS@
+RELEASE = @RELEASE@
+SED = @SED@
+SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
+SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SLURMCTLD_PORT = @SLURMCTLD_PORT@
+SLURMDBD_PORT = @SLURMDBD_PORT@
+SLURMD_PORT = @SLURMD_PORT@
+SLURM_API_AGE = @SLURM_API_AGE@
+SLURM_API_CURRENT = @SLURM_API_CURRENT@
+SLURM_API_MAJOR = @SLURM_API_MAJOR@
+SLURM_API_REVISION = @SLURM_API_REVISION@
+SLURM_API_VERSION = @SLURM_API_VERSION@
+SLURM_MAJOR = @SLURM_MAJOR@
+SLURM_MICRO = @SLURM_MICRO@
+SLURM_MINOR = @SLURM_MINOR@
+SLURM_PREFIX = @SLURM_PREFIX@
+SLURM_VERSION = @SLURM_VERSION@
+SO_LDFLAGS = @SO_LDFLAGS@
+SSL_CPPFLAGS = @SSL_CPPFLAGS@
+SSL_LDFLAGS = @SSL_LDFLAGS@
+SSL_LIBS = @SSL_LIBS@
+STRIP = @STRIP@
+UTIL_LIBS = @UTIL_LIBS@
+VERSION = @VERSION@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_F77 = @ac_ct_F77@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_os = @target_os@
+target_vendor = @target_vendor@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+AUTOMAKE_OPTIONS = foreign
+sbin_SCRIPTS = moab_2_slurmdb
+extra = config.slurmdb.pl
+_perldir = $(exec_prefix)`perl -e 'use Config; $$T=$$Config{installsitearch}; $$P=$$Config{installprefix}; $$P1="$$P/local"; $$T =~ s/$$P1//; $$T =~ s/$$P//; print $$T;'`
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \
+		&& exit 0; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign  contribs/slurmdb-direct/Makefile'; \
+	cd $(top_srcdir) && \
+	  $(AUTOMAKE) --foreign  contribs/slurmdb-direct/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+tags: TAGS
+TAGS:
+
+ctags: CTAGS
+CTAGS:
+
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \
+	    fi; \
+	    cp -pR $$d/$$file $(distdir)$$dir || exit 1; \
+	  else \
+	    test -f $(distdir)/$$file \
+	    || cp -p $$d/$$file $(distdir)/$$file \
+	    || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-am
+all-am: Makefile $(SCRIPTS)
+installdirs:
+	for dir in "$(DESTDIR)$(sbindir)"; do \
+	  test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+	done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+	$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	  install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	  `test -z '$(STRIP)' || \
+	    echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean-am: clean-generic clean-libtool mostlyclean-am
+
+distclean: distclean-am
+	-rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-exec-am: install-sbinSCRIPTS
+
+install-html: install-html-am
+
+install-info: install-info-am
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-ps: install-ps-am
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-sbinSCRIPTS
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic clean-libtool \
+	distclean distclean-generic distclean-libtool distdir dvi \
+	dvi-am html html-am info info-am install install-am \
+	install-data install-data-am install-dvi install-dvi-am \
+	install-exec install-exec-am install-html install-html-am \
+	install-info install-info-am install-man install-pdf \
+	install-pdf-am install-ps install-ps-am install-sbinSCRIPTS \
+	install-strip installcheck installcheck-am installdirs \
+	maintainer-clean maintainer-clean-generic mostlyclean \
+	mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
+	uninstall uninstall-am uninstall-sbinSCRIPTS
+
+
+moab_2_slurmdb:
+
+install-sbinSCRIPTS: $(sbin_SCRIPTS)
+	@$(NORMAL_INSTALL)
+	test -z "$(DESTDIR)$(sbindir)" || $(MKDIR_P) "$(DESTDIR)$(sbindir)"
+	@list='$(sbin_SCRIPTS)'; for p in $$list; do \
+	   echo "sed 's%use lib .*%use lib qw(${_perldir});%' $(top_srcdir)/contribs/slurmdb-direct/$$p.pl > $(DESTDIR)$(sbindir)/$$p"; \
+	   sed "s%use lib .*%use lib qw(${_perldir});%" $(top_srcdir)/contribs/slurmdb-direct/$$p.pl >$(DESTDIR)$(sbindir)/$$p; \
+	   chmod 755 $(DESTDIR)$(sbindir)/$$p;\
+	done
+
+	test -z "$(DESTDIR)${_perldir}" || $(MKDIR_P) "$(DESTDIR)${_perldir}"
+	@list='$(extra)'; for p in $$list; do \
+	   echo " cp $(top_srcdir)/contribs/slurmdb-direct/$$p $(DESTDIR)${_perldir}/$$p"; \
+	   cp $(top_srcdir)/contribs/slurmdb-direct/$$p $(DESTDIR)${_perldir}/$$p; \
+	done
+
+uninstall-sbinSCRIPTS:
+	@$(NORMAL_UNINSTALL)
+	@list='$(sbin_SCRIPTS)'; for p in $$list; do \
+	  echo " rm -f '$(DESTDIR)$(sbindir)/$$p'"; \
+	  rm -f "$(DESTDIR)$(sbindir)/$$p"; \
+	done
+	@list='$(extra)'; for p in $$list; do \
+	  echo " rm -f '$(DESTDIR)$(_perldir)/$$p'"; \
+	  rm -f "$(DESTDIR)$(_perldir)/$$p"; \
+	done
+
+clean:
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/contribs/slurmdb-direct/config.slurmdb.pl b/contribs/slurmdb-direct/config.slurmdb.pl
new file mode 100644
index 000000000..f1e81d5db
--- /dev/null
+++ b/contribs/slurmdb-direct/config.slurmdb.pl
@@ -0,0 +1,15 @@
+# Set database information
+$db_name      = "slurm_acct_db";
+$db_job_table = "job_table";
+
+# These are the options you should change for your own site.
+#$db_host      = "fargo";
+#$db_port      = "3306";
+#$db_user      = "some_user";
+#$db_passwd    = "some_password";
+
+# Database connection line for the DBI
+$db_conn_line = "DBI:mysql:database=${db_name};host=${db_host}";
+
+# "1;" Required for file inclusion
+1;
diff --git a/contribs/slurmdb-direct/moab_2_slurmdb.pl b/contribs/slurmdb-direct/moab_2_slurmdb.pl
new file mode 100755
index 000000000..650663efd
--- /dev/null
+++ b/contribs/slurmdb-direct/moab_2_slurmdb.pl
@@ -0,0 +1,275 @@
+#! /usr/bin/perl -w
+###############################################################################
+#
+# slurmdbd_direct - write directly into the slurmdbd.
+#
+# based off the index in
+# http://www.clusterresources.com/products/mwm/docs/16.3.3workloadtrace.shtml
+#
+#
+###############################################################################
+#  Copyright (C) 2009 Lawrence Livermore National Security.
+#  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+#  Written by Danny Auble <da@llnl.gov>
+#  CODE-OCEC-09-009. All rights reserved.
+#  
+#  This file is part of SLURM, a resource management program.
+#  For details, see <http://www.llnl.gov/linux/slurm/>.
+#  
+#  SLURM is free software; you can redistribute it and/or modify it under
+#  the terms of the GNU General Public License as published by the Free
+#  Software Foundation; either version 2 of the License, or (at your option)
+#  any later version.
+#
+#  In addition, as a special exception, the copyright holders give permission 
+#  to link the code of portions of this program with the OpenSSL library under
+#  certain conditions as described in each individual source file, and 
+#  distribute linked combinations including the two. You must obey the GNU 
+#  General Public License in all respects for all of the code used other than 
+#  OpenSSL. If you modify file(s) with this exception, you may extend this 
+#  exception to your version of the file(s), but you are not obligated to do 
+#  so. If you do not wish to do so, delete this exception statement from your
+#  version.  If you delete this exception statement from all source files in 
+#  the program, then also delete it here.
+#  
+#  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+#  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+#  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+#  details.
+#  
+#  You should have received a copy of the GNU General Public License along
+#  with SLURM; if not, write to the Free Software Foundation, Inc.,
+#  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+#  
+###############################################################################
+
+use strict;
+use FindBin;
+use Getopt::Long 2.24 qw(:config no_ignore_case require_order);
+#use lib "${FindBin::Bin}/../lib/perl";
+use lib qw(/home/da/slurm/1.3/snowflake/lib/perl/5.8.8);
+use autouse 'Pod::Usage' => qw(pod2usage);
+use Slurm ':all';
+use Switch;
+use DBI;
+BEGIN { require "config.slurmdb.pl"; }
+our ($logLevel, $db_conn_line, $db_job_table, $db_user, $db_passwd);
+
+my $set = 0;
+my $submit_set = 0;
+my $migrate_set = 0;
+my $start_set = 0;
+my $end_set = 0;
+
+my $submit_sql = "INSERT INTO $db_job_table " .
+	"(jobid, associd, wckeyid, track_steps, priority, uid, gid, cluster, " .
+	"account, partition, wckey, name, state, req_cpus, submit) VALUES ";
+
+my $migrate_sql = "INSERT INTO $db_job_table " .
+	"(jobid, associd, wckeyid, track_steps, priority, uid, gid, cluster, " .
+	"account, partition, wckey, name, state, req_cpus, " .
+	"submit, eligible) VALUES ";
+
+my $start_sql = "INSERT INTO $db_job_table " .
+	"(jobid, associd, wckeyid, track_steps, priority, uid, gid, cluster, " .
+	"account, partition, wckey, name, state, req_cpus, " .
+	"submit, eligible, start, nodelist, alloc_cpus) VALUES ";
+
+my $end_sql = "INSERT INTO $db_job_table " .
+	"(jobid, associd, wckeyid, track_steps, priority, uid, gid, cluster, " .
+	"account, partition, wckey, name, state, req_cpus, " .
+	"submit, eligible, start, nodelist, alloc_cpus, " .
+	"end, comp_code) VALUES ";
+
+foreach my $line (<STDIN>) {
+	chomp $line;
+	# the below list is based off the index in
+#  http://www.clusterresources.com/products/mwm/docs/16.3.3workloadtrace.shtml
+	my ($hr_time,
+	    $timestamp,
+	    $type,
+	    $id,
+	    $event,
+	    $req_nodes,
+	    $req_tasks,
+	    $user,
+	    $group,
+	    $wall_limit,
+	    $state,
+	    $partition,
+	    $eligible_time,
+	    $dispatch_time,
+	    $start_time,
+	    $end_time,
+	    $network,
+	    $arch,
+	    $op,
+	    $node_mem_comp,
+	    $node_mem,
+	    $node_disk_comp,
+	    $node_disk,
+	    $node_features,
+	    $submit_time,
+	    $alloc_tasks,
+	    $tasks_per_node,
+	    $qos,
+	    $flags,
+	    $account,
+	    $executable,
+	    $rm_ext,
+	    $bypass_cnt,
+	    $cpu_secs,
+	    $cluster,
+	    $procs_per_task,
+	    $mem_per_task,
+	    $disk_per_task,
+	    $swap_per_task,
+	    $other_time,
+	    $timeout,
+	    $alloc_hostlist,
+	    $rm_name,
+	    $req_hostlist,
+	    $resv,
+	    $app_sim_data,
+	    $desc,
+	    $message,
+	    $cost,
+	    $history,
+	    $util,
+	    $estimate,
+	    $comp_code,
+	    $ext_mem,
+	    $ext_cpu,
+	    @extra) = split /\s+/, $line;
+	next if !$type;
+	next if $type ne "job";
+
+	my $uid = getpwnam($user);
+	my $gid = getgrnam($group);
+	$uid = -2 if !$uid;
+	$gid = -2 if !$gid;
+	
+	# figure out the wckey 
+	my $wckey = "";
+	if ($rm_ext =~ /wckey:(\w*)/) {
+		$wckey = $1;
+	}
+	
+	if($partition =~ /\[(\w*)/) {
+		$partition = $1;
+	}
+
+	#figure out the cluster
+	if($cluster eq "ALL") {
+		if ($node_features =~ /\[(\w*)\]/) {
+			$cluster = $1;
+		} elsif ($rm_ext =~ /partition:(\w*)/) {
+			$cluster = $1;
+		} elsif ($rm_ext =~ /feature:(\w*)/) {
+			$cluster = $1;
+		} else {
+			$cluster = "";
+		}
+	}
+	
+	if($message =~ /job\\20exceeded\\20wallclock\\20limit/) {
+		$event = "JOBTIMEOUT";
+	}
+
+	my $alloc_hl = Slurm::Hostlist::create($alloc_hostlist);
+	if($alloc_hl) {
+		Slurm::Hostlist::uniq($alloc_hl);
+		$alloc_hl = Slurm::Hostlist::ranged_string($alloc_hl);
+	}
+	
+	if($event eq "JOBSUBMIT") {
+		$submit_sql .= ", " if $submit_set;
+		$submit_sql .= "($id, 0, 0, 0, 0, $uid, $gid, \"$cluster\", " .
+			"\"$account\", \"$partition\", \"$wckey\", " .
+			"\"$executable\", 0, $req_tasks, $submit_time)";
+		$submit_set = 1;		
+		$set = 1;		
+	} elsif ($event eq "JOBMIGRATE") {
+		$migrate_sql .= ", " if $migrate_set;
+		# here for some reason the eligible time is really the 
+		# elgible time, so we use the end time which appears 
+		# to be the best guess.
+		$migrate_sql .= "($id, 0, 0, 0, 0, $uid, $gid, \"$cluster\", " .
+			"\"$account\", \"$partition\", \"$wckey\", " .
+			"\"$executable\", 0, $req_tasks, $submit_time, " .
+			"$end_time)";
+		$migrate_set = 1;		
+		$set = 1;		
+	} elsif ($event eq "JOBSTART") {
+		$start_sql .= ", " if $start_set;
+
+		# req_tasks is used for alloc_tasks on purpose.
+		# alloc_tasks isn't always correct.
+		$start_sql .= "($id, 0, 0, 0, 0, $uid, $gid, \"$cluster\", " .
+			"\"$account\", \"$partition\", \"$wckey\", " .
+			"\"$executable\", 1, $req_tasks, $submit_time, " .
+			"$eligible_time, $start_time, \"$alloc_hl\", " .
+			"$req_tasks)";
+		$start_set = 1;		
+		$set = 1;		
+	} elsif (($event eq "JOBEND") || ($event eq "JOBCANCEL")
+		|| ($event eq "JOBFAILURE") || ($event eq "JOBTIMEOUT"))  {
+		if($event eq "JOBEND") {
+			$state = 3;
+		} elsif($event eq "JOBCANCEL") {
+			$state = 4;
+		} elsif($event eq "JOBFAILURE") {
+			$state = 5;
+		} else {
+			$state = 6;
+		}
+
+		$end_sql .= ", " if $end_set;
+		$end_sql .= "($id, 0, 0, 0, 0, $uid, $gid, \"$cluster\", " .
+			"\"$account\", \"$partition\", \"$wckey\", " .
+			"\"$executable\", $state, $req_tasks, $submit_time, " .
+			"$eligible_time, $start_time, \"$alloc_hl\", " .
+			"$req_tasks, $end_time, $comp_code)";
+		$end_set = 1;		
+		$set = 1;		
+	} else {
+		print "ERROR: unknown event of $event\n";
+		next;
+	}
+}
+
+exit 0 if !$set;
+
+$db_user = (getpwuid($<))[0] if !$db_user;
+my $dbhandle = DBI->connect($db_conn_line, $db_user, $db_passwd,
+			    {AutoCommit => 1, RaiseError => 1});
+if($submit_set) {
+	$submit_sql .= " on duplicate key update jobid=VALUES(jobid)";
+	#print "submit\n$submit_sql\n\n";
+	$dbhandle->do($submit_sql);
+}
+
+if($migrate_set) {
+	$migrate_sql .= " on duplicate key update eligible=VALUES(eligible)";
+	#print "migrate\n$migrate_sql\n\n";
+	$dbhandle->do($migrate_sql);
+}
+
+if($start_set) {
+	$start_sql .= " on duplicate key update nodelist=VALUES(nodelist), " .
+		"account=VALUES(account), partition=VALUES(partition), " .
+		"wckey=values(wckey), start=VALUES(start), " .
+		"name=VALUES(name), state=values(state), " .
+		"alloc_cpus=values(alloc_cpus)";
+	#print "start\n$start_sql\n\n";
+	$dbhandle->do($start_sql);
+}
+
+if($end_set) {
+	$end_sql .= " on duplicate key update end=VALUES(end), " .
+		"state=VALUES(state), comp_code=VALUES(comp_code)";
+	#print "end\n$end_sql\n\n";
+	$dbhandle->do($end_sql);
+}
+
+exit 0;
diff --git a/contribs/torque/Makefile.am b/contribs/torque/Makefile.am
index 320c25288..215d43867 100644
--- a/contribs/torque/Makefile.am
+++ b/contribs/torque/Makefile.am
@@ -17,7 +17,7 @@ _perldir=$(exec_prefix)`perl -e 'use Config; $$T=$$Config{installsitearch}; $$P=
 
 install-binSCRIPTS: $(bin_SCRIPTS)
 	@$(NORMAL_INSTALL)
-	test -z "$(bindir)" || $(MKDIR_P) "$(DESTDIR)$(bindir)"
+	test -z "$(DESTDIR)$(bindir)" || $(MKDIR_P) "$(DESTDIR)$(bindir)"
 	@list='$(bin_SCRIPTS)'; for p in $$list; do \
 	   echo "sed 's%use lib .*%use lib qw(${_perldir});%' $(top_srcdir)/contribs/torque/$$p.pl > $(DESTDIR)$(bindir)/$$p"; \
 	   sed "s%use lib .*%use lib qw(${_perldir});%" $(top_srcdir)/contribs/torque/$$p.pl >$(DESTDIR)$(bindir)/$$p; \
diff --git a/contribs/torque/Makefile.in b/contribs/torque/Makefile.in
index 125043062..f5bcc752a 100644
--- a/contribs/torque/Makefile.in
+++ b/contribs/torque/Makefile.in
@@ -425,7 +425,7 @@ mpiexec:
 
 install-binSCRIPTS: $(bin_SCRIPTS)
 	@$(NORMAL_INSTALL)
-	test -z "$(bindir)" || $(MKDIR_P) "$(DESTDIR)$(bindir)"
+	test -z "$(DESTDIR)$(bindir)" || $(MKDIR_P) "$(DESTDIR)$(bindir)"
 	@list='$(bin_SCRIPTS)'; for p in $$list; do \
 	   echo "sed 's%use lib .*%use lib qw(${_perldir});%' $(top_srcdir)/contribs/torque/$$p.pl > $(DESTDIR)$(bindir)/$$p"; \
 	   sed "s%use lib .*%use lib qw(${_perldir});%" $(top_srcdir)/contribs/torque/$$p.pl >$(DESTDIR)$(bindir)/$$p; \
diff --git a/doc/html/Makefile.am b/doc/html/Makefile.am
index 6b9fae583..c8c30c466 100644
--- a/doc/html/Makefile.am
+++ b/doc/html/Makefile.am
@@ -19,7 +19,7 @@ generated_html = \
 	help.html \
 	ibm.html \
 	jobacct_gatherplugins.html \
-	jobacct_storageplugins.html \
+	accounting_storageplugins.html \
 	jobcompplugins.html \
 	mail.html \
 	maui.html \
diff --git a/doc/html/Makefile.in b/doc/html/Makefile.in
index e3a50b423..fc5184132 100644
--- a/doc/html/Makefile.in
+++ b/doc/html/Makefile.in
@@ -265,7 +265,7 @@ generated_html = \
 	help.html \
 	ibm.html \
 	jobacct_gatherplugins.html \
-	jobacct_storageplugins.html \
+	accounting_storageplugins.html \
 	jobcompplugins.html \
 	mail.html \
 	maui.html \
diff --git a/doc/html/accounting.shtml b/doc/html/accounting.shtml
index 4f67cb524..c860b34c3 100644
--- a/doc/html/accounting.shtml
+++ b/doc/html/accounting.shtml
@@ -8,13 +8,14 @@ releases.</p>
 
 <p>SLURM can be configured to collect accounting information for every 
 job and job step executed. 
-Accounting records can be written to a simple file or a database.
+Accounting records can be written to a simple text file or a database.
 Information is available about both currently executing jobs and 
-jobs which have already terminated and can be viewed using the 
-<b>sacct</b> command.
-<b>sacct</b> can also report resource usage for individual tasks, 
-which can be useful to detect load imbalance between the tasks. 
-The <b>sstat</b> tool can be used to status a currently running job.
+jobs which have already terminated.
+The <b>sacct</b> command can report resource usage for running or terminated
+jobs including individual tasks, which can be useful to detect load imbalance 
+between the tasks. 
+The <b>sstat</b> command can be used to status only currently running jobs.
+It also can give you valuable information about imbalance between tasks.
 The <b>sreport</b> can be used to generate reports based upon all jobs
 executed in a particular time interval.</p>
 
@@ -22,6 +23,15 @@ executed in a particular time interval.</p>
 The SLURM configuration parameters (in <i>slurm.conf</i>) associated with 
 these plugins include:</p>
 <ul>
+<li><b>AccountingStorageType</b> controls how detailed job and job 
+step information is recorded. You can store this information in a 
+text file, <a href="http://www.mysql.com/">MySQL</a> or 
+<a href="http://www.postgresql.org/">PostgreSQL</a> 
+database, optionally using SlurmDBD for added security.</li>
+<li><b>JobAcctGatherType</b> is operating system dependent and 
+controls what mechanism is used to collect accounting information.
+Supported values are <i>jobacct_gather/aix</i>, <i>jobacct_gather/linux</i>
+and <i>jobacct_gather/none</i> (no information collected).</li>
 <li><b>JobCompType</b> controls how job completion information is 
 recorded. This can be used to record basic job information such
 as job name, user name, allocated nodes, start time, completion 
@@ -31,25 +41,18 @@ with minimal overhead. You can store this information in a
 text file, <a href="http://www.mysql.com/">MySQL</a> or 
 <a href="http://www.postgresql.org/">PostgreSQL</a> 
 database</li>
-<li><b>JobAcctGatherType</b> is operating system dependent and 
-controls what mechanisms are used to collect accounting information.
-Supported values are <i>jobacct_gather/aix</i>, <i>jobacct_gather/linux</i>
-and <i>jobacct_gather/none</i> (no information collected).</li>
-<li><b>AccountingStorageType</b> controls how detailed job and job 
-step information is recorded. You can store this information in a 
-text file, <a href="http://www.mysql.com/">MySQL</a> or 
-<a href="http://www.postgresql.org/">PostgreSQL</a> 
-database optionally using either 
-<a href="http://www.clusterresources.com/pages/products/gold-allocation-manager.php">Gold</a>
-or SlurmDBD for added security.</li>
 </ul>
 
-<p>The use of sacct or sstat to view information about completed jobs 
-is dependent upon both JobAcctGatherType and AccountingStorageType
+<p>The use of sacct to view information about jobs 
+is dependent upon AccountingStorageType
 being configured to collect and store that information.
 The use of sreport is dependent upon some database being 
 used to store that information.</p>
 
+<p>The use of sacct or sstat to view information about resource usage
+  within jobs is dependent upon both JobAcctGatherType and AccountingStorageType
+  being configured to collect and store that information.</p>
+
 <p>Storing the accounting information into text files is 
 very simple. Just configure the appropriate plugin (e.g. 
 <i>AccountingStorageType=accounting_storage/filetxt</i> and/or 
@@ -71,14 +74,11 @@ sacctmgr).
 Making possibly sensitive information available to all users makes 
 database security more difficult to provide, sending the data through
 an intermediate daemon can provide better security and performance
-(through caching data).
-Gold and SlurmDBD are two such services. 
-Our initial implementation relied upon Gold, but we found its
-performance to be inadequate for our needs and developed SlurmDBD.
+(through caching data) and SlurmDBD provides such services. 
 SlurmDBD (SLURM Database Daemon) is written in C, multi-threaded, 
-secure, and considerably faster than Gold.
+secure and fast.
 The configuration required to use SlurmDBD will be described below.
-Direct database or Gold use would be similar.</p>
+Storing information directly into database would be similar.</p>
 
 <p>Note that SlurmDBD relies upon existing SLURM plugins
 for authentication and database use, but the other SLURM 
@@ -128,9 +128,9 @@ The pathname of local domain socket will be needed in the SLURM
 and SlurmDBD configuration files (slurm.conf and slurmdbd.conf 
 respectively, more details are provided below).</p>
 
-Whether you use any authentication module or not you will need to have
+<p?Whether you use any authentication module or not you will need to have
 a way for the SlurmDBD to get uid's for users and/or admin.  If using
-Munge it is ideal for your users to have the same id on all your
+Munge, it is ideal for your users to have the same id on all your
 clusters.  If this is the case you should have a combination of every clusters
 /etc/passwd file on the database server to allow the DBD to resolve
 names for authentication.  If using Munge and a users name is not in
@@ -143,9 +143,9 @@ LDAP server could also server as a way to gather this information.
 <h2>Slurm JobComp Configuration</h2>
 
 <p>Presently job completion is not supported with the SlurmDBD, but can be
-written directly to a database, script or flat file.If you are
+written directly to a database, script or flat file. If you are
 running with the accounting storage, you may not need to run this
-since it contains much of the same information.If you would like
+since it contains much of the same information. If you would like
 to configure this, some of the more important parameters include:</p>
 
 <ul>
@@ -155,7 +155,8 @@ the database server executes.</li>
 
 <li><b>JobCompPass</b>:
 Only needed if using a database. Password for the user connecting to
-the database.</li>
+the database. Since the password can not be security maintained,
+storing the information directly in a database is not recommended.</li>
 
 <li><b>JobCompPort</b>:
 Only needed if using a database. The network port that the database
@@ -177,23 +178,21 @@ job completions and such this configuration will not allow
 "associations" between a user and account. A database allows such
 a configuration. 
 
-<p>
-<b>MySQL is the preferred database, PostgreSQL is
+<p><b>MySQL is the preferred database, PostgreSQL is
 supported for job and step accounting only.</b> The infrastructure for
 PostgresSQL for use with associations is not yet supported, meaning
 sacctmgr will not work correcting.  If interested in adding this
-capabilty for PostgresSQL please email slurm-dev@lists.llnl.gov.
+capability for PostgresSQL, please contact us at slurm-dev@lists.llnl.gov.
 
-<p>
-To enable this database support
+<p>To enable this database support
 one only needs to have the development package for the database they
 wish to use on the system. The slurm configure script uses
 mysql_config and pg-config to find out the information it needs
 about installed libraries and headers. You can specify where your
 mysql_config script is with the
 </i>--with-mysql_conf=/path/to/mysql_config</i> option when configuring your
-slurm build. A similar option is available for PostgreSQL also. On
-a successful configure, output is something like this: </p>
+slurm build. A similar option is also available for PostgreSQL. 
+On a successful configure, output is something like this: </p>
 <pre>
 checking for mysql_config... /usr/bin/mysql_config
 MySQL test program built properly.
@@ -203,7 +202,7 @@ MySQL test program built properly.
 
 <p>For simplicity sake we are going to reference everything as if you
 are running with the SlurmDBD. You can communicate with a storage plugin
-directly, but that offers minimal authentication. 
+directly, but that offers minimal security. </p>
 
 <p>Several SLURM configuration parameters must be set to support
 archiving information in SlurmDBD. SlurmDBD has a separate configuration
@@ -212,21 +211,36 @@ Note that you can write accounting information to SlurmDBD
 while job completion records are written to a text file or 
 not maintained at all. 
 If you don't set the configuration parameters that begin 
-with "JobComp" then job completion records will not be recorded.</p>
+with "AccountingStorage" then accounting information will not be
+referenced or recorded.</p>
 
 <ul>
 <li><b>AccountingStorageEnforce</b>:
-If you want to prevent users from running jobs if their <i>association</i>
-is not in the database, then set this to "1". 
+This option contains a comma separated list of options you may want to
+ enforce.  The valid options are 
+<ul>
+<li>associations - This will prevent users from running jobs if
+their <i>association</i> is not in the database. This option will
+prevent users from accessing invalid accounts.  
+</li>
+<li>limits - This will enforce limits set to associations.  By setting
+  this option, the 'associations' option is also set.
+</li>
+<li>wckeys - This will prevent users from running jobs under a wckey
+  that they don't have access to.  By using this option, the
+  'associations' option is also set.  The 'TrackWCKey' option is also
+  set to true.
+</li>
+</ul>
 (NOTE: The association is a combination of cluster, account, 
 user names and optional partition name.)
+<br>
 Without AccountingStorageEnforce being set (the default behavior) 
-jobs will be executed based upon policies configured in SLURM on each cluster. 
-This option will prevent users from accessing invalid accounts.  
-Setting this to "2" will also cause association limits to be
-enforced.  When set to "1" association limits will not be
-enforced.  It is a good idea to run in this mode when running a
-scheduler on top of slurm, like Moab, that does not update in real
+jobs will be executed based upon policies configured in SLURM on each
+cluster.
+<br> 
+It is advisable to run without the option 'limits' set when running a
+scheduler on top of SLURM, like Moab, that does not update in real
 time their limits per association.</li>
 
 <li><b>AccountingStorageHost</b>: The name or address of the host where 
@@ -245,6 +259,13 @@ Set to "accounting_storage/slurmdbd".</li>
 <li><b>ClusterName</b>:
 Set to a unique name for each Slurm-managed cluster so that 
 accounting records from each can be identified.</li>
+<li><b>TrackWCKey</b>:
+Boolean.  If you want to track wckeys (Workload Characterization Key)
+  of users.  A Wckey is an orthogonal way to do accounting against
+  maybe a group of unrelated accounts. WCKeys can be defined using
+  sacctmgr add wckey 'name'.  When a job is run use srun --wckey and 
+  time will be summed up for this wckey.
+</li>
 </ul>
 
 <h2>SlurmDBD Configuration</h2>
@@ -322,12 +343,7 @@ Define the port on which the database is listening.</li>
 <li><b>StorageType</b>:
 Define the accounting storage mechanism type.
 Acceptable values at present include 
-"accounting_storage/gold", "accounting_storage/mysql", and
-"accounting_storage/pgsql".
-The value "accounting_storage/gold" indicates that account records
-will be written to Gold, which maintains its own database.
-Use of Gold is not recommended due to reduced performance without 
-providing any additional security.
+"accounting_storage/mysql" and "accounting_storage/pgsql".
 The value "accounting_storage/mysql" indicates that accounting records
 should be written to a MySQL database specified by the 
 <i>StorageLoc</i> parameter.
@@ -391,7 +407,7 @@ given time period.</li>
 <p>See the man pages for each command for more information.</p>
 
 <p>Web interfaces with graphical output is currently under
-development and should be available in the Fall of 2008.
+development and should be available in the Fall of 2009.
 A tool to report node state information is also under development.</p>
 
 <h2>Database Configuration</h2>
@@ -446,8 +462,8 @@ with a default account of <i>test</i> execute:</p>
 sacctmgr add user da default=test
 </pre>
 
-<p>If <b>AccountingStorageEnforce=1</b> is configured in the slurm.conf of 
-the cluster <i>snowflake</i> then user <i>da</i> would be
+<p>If <b>AccountingStorageEnforce=associations</b> is configured in
+the slurm.conf of the cluster <i>snowflake</i> then user <i>da</i> would be
 allowed to run in account <i>test</i> and any other accounts added
 in the future.
 Any attempt to use other accounts will result in the job being 
@@ -459,7 +475,6 @@ the job submission command.</p>
 Partition='partitionname' option to specify an association specific to
 a slurm partition.</p>
 
-<!-- For future use
 <h2>Cluster Options</h2>
 
 <p>When either adding or modifying a cluster, these are the options 
@@ -467,48 +482,28 @@ available with sacctmgr:
 <ul>
 <li><b>Name=</b> Cluster name</li>
 
-<li><b>Fairshare=</b> Used for determining priority</li>
-
-<li><b>MaxJobs=</b> Limit number of jobs a user can run in this account</li>
-
-<li><b>MaxNodes=</b>Limit number of nodes a user can allocate in this 
-account</li>
-
-<li><b>MaxWall=</b>Limit wall clock time a job can run</li>
-
-<li><b>MaxCPUSecs=</b> Limit cpu seconds a job can run</li>
 </ul>
-!-->
 
 <h2>Account Options</h2>
 
 <p>When either adding or modifying an account, the following sacctmgr 
 options are available:
 <ul>
-<li><b>Description=</b> Description of the account. (Required on creation)</li>
-
-<li><b>Organization=</b>Organization of the account. (Required on creation)</li>
-
-<li><b>Name=</b> Name of account</li>
-
 <li><b>Cluster=</b> Only add this account to these clusters.
 The account is added to all defined clusters by default.</li>
 
-<li><b>Parent=</b> Make this account a child of this other account.</li>
-
-<!-- For future use
-<li><b>QOS=</b> Quality of Service</li>
-
-<li><b>Fairshare=</b> Used for determining priority</li>
+<li><b>Description=</b> Description of the account. (Default is
+  account name)</li>
 
-<li><b>MaxJobs=</b> Limit number of jobs a user can run in this account</li>
+<li><b>Name=</b> Name of account</li>
 
-<li><b>MaxNodes=</b>Limit number of nodes a user can allocate in this account</li>
+<li><b>Organization=</b>Organization of the account. (Default is
+  parent account unless parent account is root then organization is
+  set to the account name.)</li>
 
-<li><b>MaxWall=</b>Limit wall time a job can run</li>
+<li><b>Parent=</b> Make this account a child of this other account
+  (already added).</li>
 
-<li><b>MaxCPUSecs=</b> Limit cpu seconds a job can run</li>
-!-->
 </ul>
 
 <h2>User Options</h2>
@@ -517,10 +512,7 @@ The account is added to all defined clusters by default.</li>
 options are available:
 
 <ul>
-<li><b>Name=</b> User name</li>
-
-<li><b>DefaultAccount=</b> Default account for the user, used when no account 
-is specified when a job is submitted. (Required on creation)</li>
+<li><b>Account=</b> Account(s) to add user to</li>
 
 <li><b>AdminLevel=</b> This field is used to allow a user to add accounting 
 privileges to this user. Valid options are 
@@ -531,37 +523,117 @@ privileges to this user. Valid options are
 and remove accounts and clusters</li>
 </ul>
 
-<li><b>Account=</b> Account(s) to add user to</li>
-
 <li><b>Cluster=</b> Only add to accounts on these clusters (default is all clusters)</li>
 
-<li><b>Partition=</b> Name of Slurm partition this association applies to</li>
-
-<!-- For future use
-<li><b>QOS=</b> Quality of Service</li>
-
-<li><b>Fairshare=</b> Used for determining priority</li>
+<li><b>DefaultAccount=</b> Default account for the user, used when no account 
+is specified when a job is submitted. (Required on creation)</li>
 
-<li><b>MaxJobs=</b> Limit number of jobs a user can run in this account</li>
+<li><b>DefaultWCKey=</b> Default wckey for the user, used when no wckey 
+is specified when a job is submitted. (Only used when tracking wckeys.)</li>
 
-<li><b>MaxNodes=</b> Limit number of nodes a user can allocate in this account</li>
+<li><b>Name=</b> User name</li>
 
-<li><b>MaxWall=</b> Limit wall time a job can run</li>
+<li><b>Partition=</b> Name of SLURM partition this association applies to</li>
 
-<li><b>MaxCPUSecs=</b> Limit cpu seconds a job can run</li>
-!-->
 </ul>
 
-<!-- For future use
 <h2>Limit enforcement</h2>
 
-<p>When limits are developed they will work in this order...
+<p>When limits are developed they will work in this order.
 If a user has a limit set SLURM will read in those, 
 if not we will refer to the account associated with the job. 
 If the account doesn't have the limit set we will refer to 
 the cluster's limits. 
 If the cluster doesn't have the limit set no limit will be enforced.
-!-->
+<p>All of the above entities can include limits as described below...
+
+<ul>
+
+<li><b>Fairshare=</b> Used for determining priority.  Essentially
+  this is the amount of claim this association and it's children have
+  to the above system.</li>
+</li>
+
+<!-- For future use
+<li><b>GrpCPUMins=</b> A hard limit of cpu minutes to be used by jobs
+  running from this association and its children.  If this limit is
+  reached all jobs running in this group will be killed, and no new
+  jobs will be allowed to run.
+</li>
+-->
+
+<!-- For future use
+<li><b>GrpCPUs=</b> The total count of cpus able to be used at any given
+  time from jobs running from this association and its children.  If
+  this limit is reached new jobs will be queued but only allowed to
+  run after resources have been relinquished from this group.
+</li>
+-->
+
+<li><b>GrpJobs=</b> The total number of jobs able to run at any given
+  time from this association and its children.  If
+  this limit is reached new jobs will be queued but only allowed to
+  run after previous jobs complete from this group.
+</li>
+
+<li><b>GrpNodes=</b> The total count of nodes able to be used at any given
+  time from jobs running from this association and its children.  If
+  this limit is reached new jobs will be queued but only allowed to
+  run after resources have been relinquished from this group.
+</li>
+
+<li><b>GrpSubmitJobs=</b> The total number of jobs able to be submitted
+  to the system at any given time from this association and its children.  If
+  this limit is reached new submission requests will be denied until
+  previous jobs complete from this group.
+</li>
+
+<li><b>GrpWall=</b> The maximum wall clock time any job submitted to
+  this group can run for.  If this limit is reached submission requests
+  will be denied. 
+</li>
+
+<!-- For future use
+<li><b>MaxCPUMinsPerJob=</b> A limit of cpu minutes to be used by jobs
+  running from this association.  If this limit is
+  reached the job will be killed will be allowed to run.
+</li>
+-->
+
+<!-- For future use
+<li><b>MaxCPUsPerJob=</b> The maximum size in cpus any given job can
+  have from this association.  If this limit is reached the job will
+  be denied at submission.
+</li>
+-->
+
+<li><b>MaxJobs=</b> The total number of jobs able to run at any given
+  time from this association.  If this limit is reached new jobs will
+  be queued but only allowed to run after previous jobs complete from
+  this association.
+</li>
+
+<li><b>MaxNodesPerJob=</b> The maximum size in nodes any given job can
+  have from this association.  If this limit is reached the job will
+  be denied at submission.
+</li>
+ 
+<li><b>MaxSubmitJobs=</b> The maximum number of jobs able to be submitted
+  to the system at any given time from this association.  If
+  this limit is reached new submission requests will be denied until
+  previous jobs complete from this association.
+</li>
+
+<li><b>MaxWallDurationPerJob=</b> The maximum wall clock time any job
+  submitted to this association can run for.  If this limit is reached
+  the job will be denied at submission.
+</li>
+
+<li><b>QOS=</b> comma separated list of QOS's this association is
+  able to run.
+
+</li>
+</ul>
 
 <h2>Modifying Entities</h2>
 
@@ -595,7 +667,7 @@ as deleted.
 If an entity has existed for less than 1 day, the entity will be removed 
 completely. This is meant to clean up after typographic errors.</p>
 
-<p style="text-align: center;">Last modified 27 June 2008</p>
+<p style="text-align: center;">Last modified 2 March 2009</p>
 
 <!--#include virtual="footer.txt"-->
 
diff --git a/doc/html/accounting_storageplugins.shtml b/doc/html/accounting_storageplugins.shtml
new file mode 100644
index 000000000..2423c6955
--- /dev/null
+++ b/doc/html/accounting_storageplugins.shtml
@@ -0,0 +1,885 @@
+<!--#include virtual="header.txt"-->
+
+<h1><a name="top">SLURM Accounting Storage Plugin API</a></h1>
+
+<h2> Overview</h2>
+<p> This document describes SLURM Accounting Storage plugins and the API that
+defines them. It is intended as a resource to programmers wishing to write
+their own SLURM Job Accounting Storage plugins. This is version 1 of the API.
+
+<p>SLURM Accounting Storage plugins must conform to the
+SLURM Plugin API with the following specifications:
+
+<p><span class="commandline">const char
+plugin_name[]="<i>full&nbsp;text&nbsp;name</i>"
+<p style="margin-left:.2in">
+A free-formatted ASCII text string that identifies the plugin.
+
+<p><span class="commandline">const char
+plugin_type[]="<i>major/minor</i>"</span><br>
+<p style="margin-left:.2in">
+The major type must be &quot;accounting_storage.&quot;
+The minor type can be any suitable name
+for the type of accounting package. We currently use
+<ul>
+<li><b>filetxt</b>&#151;Information written to a text file.
+<li><b>mysql</b>&#151; Store information in a mysql database.
+<li><b>pgsql</b>&#151; Store information in a postgresql database.
+<li><b>none</b>&#151; Information is not stored anywhere.
+</ul>
+<p>The programmer is urged to study 
+<span class="commandline">src/plugins/accounting_storage/mysql</span>
+for a sample implementation of a SLURM Accounting Storage plugin.
+<p> The Accounting Storage plugin was written to be a interface
+to storage data collected by the Job Accounting Gather plugin.  When
+adding a new database you may want to add common functions in a common
+file in the src/database dir.  Refer to src/database/mysql_common.c|.h for an
+example so other plugins can also use that database type to write out
+information. 
+<p class="footer"><a href="#top">top</a>
+
+
+<h2>API Functions</h2>
+
+The Job Accounting Storage API uses hooks in the slurmctld.
+
+<p>All of the following functions are required. Functions which are not
+implemented must be stubbed. 
+
+<h4>Functions called by the accounting_storage plugin</h4>
+
+<p class="commandline">void *acct_storage_p_get_connection(bool
+  make_agent, int conn_num, bool rollback, char *location) 
+<p style="margin-left:.2in"><b>Description</b>:<br>
+acct_storage_p_get_connection() is called to get a connection to the
+  storage medium. acct_storage_p_close_connection() should be used to
+  free the pointer returned by this function.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">make_agent</span> (input) to make an agent
+thread of not.  This is primarily used in the slurmdbd plugin.<br>
+<span class="commandline">conn_num</span> (input) connection number to
+the plugin.  In many cases you should plan on multiple simultanious
+connections to the plugin.  This number is useful since the debug
+messages can print this out to determine which connection the message
+is from.<br>
+<span class="commandline">rollback</span> (input) Allow rollback to
+happen or not (in use with databases that support rollback).<br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">void *</span> which is an opaque structure
+used inside the plugin to connection to the storage type on success, or<br>
+<span class="commandline">NULL</span> on failure.
+
+<p class="commandline">int acct_storage_p_close_connection(void **db_conn) 
+<p style="margin-left:.2in"><b>Description</b>:<br>
+acct_storage_p_close_connection() is called at the end of the program that has
+called acct_storage_p_get_connection this function closes the connection to
+the storage type.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input/output) connection to
+the storage type, all memory will be freed inside this function and
+NULLed out. 
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">SLURM_SUCCESS</span> on success, or<br>
+<span class="commandline">SLURM_ERROR</span> on failure.
+
+<p class="commandline">int acct_storage_p_commit(void *db_conn, bool commit)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+acct_storage_p_commit() is called at a point where you would either
+  want changes to storage be committed or rolled back.  This function
+  should also send appropriate update messages to the various slurmctlds.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type. <br>
+<span class="commandline">commit</span> (input) true for commit, false
+to rollback if connection was set up to rollback. <br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">SLURM_SUCCESS</span> on success, or<br>
+<span class="commandline">SLURM_ERROR</span> on failure.
+
+<p class="commandline">
+int acct_storage_p_add_users(void *db_conn, uint32_t uid, List user_list)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Called to add users to the storage type.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type. <br>
+<span class="commandline">uid</span> (input) uid of user calling the function.<br> 
+<span class="commandline">user_list</span> (input) list of
+acct_user_rec_t *'s containing information about the users being added.<br> 
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">SLURM_SUCCESS</span> on success, or<br>
+<span class="commandline">SLURM_ERROR</span> on failure.
+
+<p class="commandline">
+int acct_storage_p_add_coord(void *db_conn, uint32_t uid, List acct_list, acct_user_cond_t *user_cond)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Called to link specified users to the specified accounts as coordinators.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type. <br>
+<span class="commandline">uid</span> (input) uid of user calling the function.<br> 
+<span class="commandline">acct_list</span> (input) list of
+acct_account_rec_t *'s containing information about the accounts to
+add the coordinators to. <br>
+<span class="commandline">user_cond</span> (input) contain a list of
+users to add to be coordinators of the acct_list.<br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">SLURM_SUCCESS</span> on success, or<br>
+<span class="commandline">SLURM_ERROR</span> on failure.
+
+<p class="commandline">
+int acct_storage_p_add_accts(void *db_conn, uint32_t uid, List acct_list)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Called to add accounts to the storage type.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type. <br>
+<span class="commandline">uid</span> (input) uid of user calling the function. <br>
+<span class="commandline">acct_list</span> (input) list of
+acct_account_rec_t *'s containing information about the accounts to add. <br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">SLURM_SUCCESS</span> on success, or<br>
+<span class="commandline">SLURM_ERROR</span> on failure.
+
+<p class="commandline">
+int acct_storage_p_add_clusters(void *db_conn, uint32_t uid, List cluster_list)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Called to add clusters to the storage type.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type. <br>
+<span class="commandline">uid</span> (input) uid of user calling the function.<br> 
+<span class="commandline">cluster_list</span> (input) list of
+acct_cluster_rec_t *'s containing information about the clusters to add. <br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">SLURM_SUCCESS</span> on success, or<br>
+<span class="commandline">SLURM_ERROR</span> on failure.
+
+<p class="commandline">
+int acct_storage_p_add_associations(void *db_conn, uint32_t uid, List association_list)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Called to add associations to the storage type.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type. <br>
+<span class="commandline">uid</span> (input) uid of user calling the function.<br> 
+<span class="commandline">association_list</span> (input) list of
+acct_association_rec_t *'s containing information about the
+associations to add. <br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">SLURM_SUCCESS</span> on success, or<br>
+<span class="commandline">SLURM_ERROR</span> on failure.
+
+<p class="commandline">
+int acct_storage_p_add_qos(void *db_conn, uint32_t uid, List qos_list)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Called to add QOS' to the storage type.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type. <br>
+<span class="commandline">uid</span> (input) uid of user calling the function.<br> 
+<span class="commandline">qos_list</span> (input) list of
+acct_qos_rec_t *'s containing information about the qos to add. <br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">SLURM_SUCCESS</span> on success, or<br>
+<span class="commandline">SLURM_ERROR</span> on failure.
+
+<p class="commandline">
+int acct_storage_p_add_wckeys(void *db_conn, uint32_t uid, List wckey_list)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Called to add wckeys to the storage type.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type. <br>
+<span class="commandline">uid</span> (input) uid of user calling the function.<br> 
+<span class="commandline">wckey_list</span> (input) list of
+acct_wckey_rec_t *'s containing information about the wckeys to add. <br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">SLURM_SUCCESS</span> on success, or<br>
+<span class="commandline">SLURM_ERROR</span> on failure.
+
+<p class="commandline">
+List acct_storage_p_modify_users(void *db_conn, uint32_t uid,
+acct_user_cond_t *user_cond, acct_user_rec_t *user)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Used to modify existing users in the storage type.  The condition
+  could include very vaque information about the user, so this
+  function should be robust in the ability to give everything the user
+  is asking for.  This is the reason a list of modified users is
+  returned so the caller knows what has been changed, sometimes by mistake.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type. <br>
+<span class="commandline">uid</span> (input) uid of user calling the function.<br> 
+<span class="commandline">user_cond</span> (input) conditional about
+which users need to change.  User names or ids should not need to be stated.<br>
+<span class="commandline">user</span> (input) what the changes
+should be on the users identified by the conditional.<br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">List</span> containing names of users
+modified on success, or<br>
+<span class="commandline">NULL</span> on failure.
+
+<p class="commandline">
+List acct_storage_p_modify_accounts(void *db_conn, uint32_t uid,
+acct_account_cond_t *acct_cond, acct_account_rec_t *acct)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Used to modify existing accounts in the storage type.  The condition
+  could include very vaque information about the account, so this
+  function should be robust in the ability to give everything the account
+  is asking for.  This is the reason a list of modified accounts is
+  returned so the caller knows what has been changed, sometimes by mistake.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type. <br>
+<span class="commandline">uid</span> (input) uid of user calling the function.<br> 
+<span class="commandline">acct_cond</span> (input) conditional about
+which accounts need to change.  Account names should not need to be stated.<br>
+<span class="commandline">acct</span> (input) what the changes
+should be on the accounts identified by the conditional.<br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">List</span> containing names of users
+modified on success, or<br>
+<span class="commandline">NULL</span> on failure.
+
+<p class="commandline">
+List acct_storage_p_modify_clusters(void *db_conn, uint32_t uid,
+acct_cluster_cond_t *cluster_cond, acct_cluster_rec_t *cluster)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Used to modify existing clusters in the storage type.  The condition
+  could include very vaque information about the cluster, so this
+  function should be robust in the ability to give everything the cluster
+  is asking for.  This is the reason a list of modified clusters is
+  returned so the caller knows what has been changed, sometimes by mistake.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type. <br>
+<span class="commandline">uid</span> (input) uid of user calling the function.<br> 
+<span class="commandline">cluster_cond</span> (input) conditional about
+which clusters need to change.  Cluster names should not need to be stated.<br>
+<span class="commandline">cluster</span> (input) what the changes
+should be on the clusters identified by the conditional.<br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">List</span> containing names of clusters
+modified on success, or<br>
+<span class="commandline">NULL</span> on failure.
+
+<p class="commandline">
+List acct_storage_p_modify_associations(void *db_conn, uint32_t uid,
+acct_association_cond_t *assoc_cond, acct_association_rec_t *assoc)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Used to modify existing associations in the storage type.  The condition
+  could include very vaque information about the association, so this
+  function should be robust in the ability to give everything the association
+  is asking for.  This is the reason a list of modified associations is
+  returned so the caller knows what has been changed, sometimes by mistake.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type. <br>
+<span class="commandline">uid</span> (input) uid of user calling the function.<br> 
+<span class="commandline">assoc_cond</span> (input) conditional about
+which associations need to change.  Association ids should not need to be stated.<br>
+<span class="commandline">assoc</span> (input) what the changes
+should be on the associations identified by the conditional.<br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">List</span> containing names of associations
+modified on success, or<br>
+<span class="commandline">NULL</span> on failure.
+
+<p class="commandline">
+List acct_storage_p_modify_qos(void *db_conn, uint32_t uid,
+acct_qos_cond_t *qos_cond, acct_qos_rec_t *qos)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Used to modify existing qos in the storage type.  The condition
+  could include very vaque information about the qos, so this
+  function should be robust in the ability to give everything the qos
+  is asking for.  This is the reason a list of modified qos is
+  returned so the caller knows what has been changed, sometimes by mistake.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type.<br>
+<span class="commandline">uid</span> (input) uid of user calling the function.<br> 
+<span class="commandline">qos_cond</span> (input) conditional about
+which qos need to change.  Qos names should not need to be stated.<br>
+<span class="commandline">qos</span> (input) what the changes
+should be on the qos identified by the conditional.<br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">List</span> containing names of qos
+modified on success, or<br>
+<span class="commandline">NULL</span> on failure.
+
+<p class="commandline">
+List acct_storage_p_modify_wckeys(void *db_conn, uint32_t uid,
+acct_wckey_cond_t *wckey_cond, acct_wckey_rec_t *wckey)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Used to modify existing wckeys in the storage type.  The condition
+  could include very vaque information about the wckeys, so this
+  function should be robust in the ability to give everything the wckey
+  is asking for.  This is the reason a list of modified wckey is
+  returned so the caller knows what has been changed, sometimes by mistake.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type.<br>
+<span class="commandline">uid</span> (input) uid of user calling the function.<br> 
+<span class="commandline">wckey_cond</span> (input) conditional about
+which wckeys need to change.  Wckey names should not need to be stated.<br>
+<span class="commandline">wckey</span> (input) what the changes
+should be on the wckey identified by the conditional.<br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">List</span> containing names of wckeys
+modified on success, or<br>
+<span class="commandline">NULL</span> on failure.
+
+<p class="commandline">
+List acct_storage_p_remove_users(void *db_conn, uint32_t uid,
+acct_user_cond_t *user_cond)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Used to remove users from the storage type.  This will remove all
+  associations.  Must check to make sure all running jobs are finished
+  before this is allowed to execute.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type. <br>
+<span class="commandline">uid</span> (input) uid of user calling the function.<br> 
+<span class="commandline">user_cond</span> (input) conditional about
+which users to be removed.  User names or ids should not need to be stated.<br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">List</span> containing names of users
+removed on success, or<br>
+<span class="commandline">NULL</span> on failure.
+
+<p class="commandline">
+List acct_storage_p_remove_coord(void *db_conn, uint32_t uid, 
+List acct_list, acct_user_cond_t *user_cond)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Used to remove coordinators from the storage type.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type. <br>
+<span class="commandline">uid</span> (input) uid of user calling the function.<br> 
+<span class="commandline">acct_list</span> (input) list of accounts
+associated with the users.<br> 
+<span class="commandline">user_cond</span> (input) conditional about
+which users to be removed as coordinators.  User names or ids should be stated.<br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">List</span> containing names of users
+removed as coordinators on success, or<br>
+<span class="commandline">NULL</span> on failure.
+
+<p class="commandline">
+List acct_storage_p_remove_accounts(void *db_conn, uint32_t uid, 
+acct_account_cond_t *acct_cond)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Used to remove accounts from the storage type. This will remove all
+  associations from these accounts.  You need to make sure no jobs are
+  running with any association that is to be removed.  If any of these
+  accounts are default accounts for users that must also change before
+  an account can be removed.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type. <br>
+<span class="commandline">uid</span> (input) uid of user calling the function.<br> 
+<span class="commandline">acct_cond</span> (input) conditional about
+which accounts to be removed.  Account names should not need to be stated.<br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">List</span> containing names of accounts
+removed on success, or<br>
+<span class="commandline">NULL</span> on failure.
+
+<p class="commandline">
+List acct_storage_p_remove_clusters(void *db_conn, uint32_t uid, 
+acct_cluster_cond_t *cluster_cond)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Used to remove clusters from the storage type. This will remove all
+  associations from these clusters.  You need to make sure no jobs are
+  running with any association that is to be removed. 
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type. <br>
+<span class="commandline">uid</span> (input) uid of user calling the function.<br> 
+<span class="commandline">cluster_cond</span> (input) conditional about
+which clusters to be removed.  Cluster names should not need to be stated.<br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">List</span> containing names of clusters
+removed on success, or<br>
+<span class="commandline">NULL</span> on failure.
+
+<p class="commandline">
+List acct_storage_p_remove_associations(void *db_conn, uint32_t uid,
+acct_association_cond_t *assoc_cond)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Used to remove associations from the storage type.  You need to make
+  sure no jobs are running with any association that is to be removed. 
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type. <br>
+<span class="commandline">uid</span> (input) uid of user calling the function.<br> 
+<span class="commandline">assoc_cond</span> (input) conditional about
+which associations to be removed.  Association ids should not need to be stated.<br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">List</span> containing names of associations
+removed on success, or<br>
+<span class="commandline">NULL</span> on failure.
+
+<p class="commandline">
+List acct_storage_p_remove_qos(void *db_conn, uint32_t uid,
+acct_qos_cond_t *qos_cond)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Used to remove qos from the storage type.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type. <br>
+<span class="commandline">uid</span> (input) uid of user calling the function.<br> 
+<span class="commandline">qos_cond</span> (input) conditional about
+which qos to be removed.  Qos names should not need to be stated.<br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">List</span> containing names of qos
+removed on success, or<br>
+<span class="commandline">NULL</span> on failure.
+
+<p class="commandline">
+List acct_storage_p_remove_wckeys(void *db_conn, uint32_t uid,
+acct_wckey_cond_t *wckey_cond)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Used to remove wckeys from the storage type.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type. <br>
+<span class="commandline">uid</span> (input) uid of user calling the function.<br> 
+<span class="commandline">wckey_cond</span> (input) conditional about
+which wckeys to be removed.  Wckey names should not need to be stated.<br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">List</span> containing names of wckeys
+removed on success, or<br>
+<span class="commandline">NULL</span> on failure.
+
+<p class="commandline">
+List acct_storage_p_get_users(void *db_conn, uint32_t uid,
+acct_user_cond_t *user_cond)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Get a list of acct_user_rec_t *'s based on the conditional sent.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type. <br>
+<span class="commandline">uid</span> (input) uid of user calling the
+function.<br> 
+<span class="commandline">user_cond</span> (input) conditional about
+which users are to be returned.  User names or ids should not need to
+be stated.<br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">List</span> containing acct_user_rec_t *'s
+on success, or<br>
+<span class="commandline">NULL</span> on failure.
+
+		<p class="commandline">
+List acct_storage_p_get_accts(void *db_conn, uint32_t uid,
+acct_account_cond_t *acct_cond)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Get a list of acct_account_rec_t *'s based on the conditional sent.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type. <br>
+<span class="commandline">uid</span> (input) uid of user calling the
+function.<br> 
+<span class="commandline">acct_cond</span> (input) conditional about
+which accounts are to be returned.  Account names should not need to
+be stated.<br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">List</span> containing acct_account_rec_t *'s
+on success, or<br>
+<span class="commandline">NULL</span> on failure.
+
+<p class="commandline">
+List acct_storage_p_get_clusters(void *db_conn, uint32_t uid,
+acct_cluster_cond_t *cluster_cond)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Get a list of acct_cluster_rec_t *'s based on the conditional sent.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type. <br>
+<span class="commandline">uid</span> (input) uid of user calling the
+function.<br> 
+<span class="commandline">cluster_cond</span> (input) conditional about
+which clusters are to be returned.  Cluster names should not need to
+be stated.<br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">List</span> containing acct_cluster_rec_t *'s
+on success, or<br>
+<span class="commandline">NULL</span> on failure.
+
+<p class="commandline">
+List acct_storage_p_get_associations(void *db_conn, uint32_t uid,
+acct_association_cond_t *assoc_cond)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Get a list of acct_association_rec_t *'s based on the conditional sent.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type. <br>
+<span class="commandline">uid</span> (input) uid of user calling the
+function.<br> 
+<span class="commandline">assoc_cond</span> (input) conditional about
+which associations are to be returned.  Association names should not need to
+be stated.<br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">List</span> containing acct_association_rec_t *'s
+on success, or<br>
+<span class="commandline">NULL</span> on failure.
+
+<p class="commandline">
+List acct_storage_p_get_qos(void *db_conn, uint32_t uid,
+acct_qos_cond_t *qos_cond)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Get a list of acct_qos_rec_t *'s based on the conditional sent.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type. <br>
+<span class="commandline">uid</span> (input) uid of user calling the
+function.<br> 
+<span class="commandline">qos_cond</span> (input) conditional about
+which qos are to be returned.  Qos names should not need to
+be stated.<br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">List</span> containing acct_qos_rec_t *'s
+on success, or<br>
+<span class="commandline">NULL</span> on failure.
+
+<p class="commandline">
+List acct_storage_p_get_wckeys(void *db_conn, uint32_t uid,
+acct_wckey_cond_t *wckey_cond)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Get a list of acct_wckey_rec_t *'s based on the conditional sent.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type. <br>
+<span class="commandline">uid</span> (input) uid of user calling the
+function.<br> 
+<span class="commandline">wckey_cond</span> (input) conditional about
+which wckeys are to be returned.  Wckey names should not need to
+be stated.<br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">List</span> containing acct_wckey_rec_t *'s
+on success, or<br>
+<span class="commandline">NULL</span> on failure.
+
+<p class="commandline">
+List acct_storage_p_get_txn(void *db_conn, uint32_t uid,
+acct_txn_cond_t *txn_cond)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Get a list of acct_txn_rec_t *'s (transactions) based on the conditional sent.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type. <br>
+<span class="commandline">uid</span> (input) uid of user calling the
+function.<br> 
+<span class="commandline">txn_cond</span> (input) conditional about
+which transactions are to be returned.  Transaction ids should not need to
+be stated.<br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">List</span> containing acct_txn_rec_t *'s
+on success, or<br>
+<span class="commandline">NULL</span> on failure.
+
+<p class="commandline">
+int acct_storage_p_get_usage(void *db_conn, uint32_t uid, void *in, int type,
+time_t start, time_t end)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Get usage for a specific association or wckey.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type.<br>
+<span class="commandline">uid</span> (input) uid of user calling the
+function.<br> 
+<span class="commandline">in</span> (input/out) can be anything that
+gathers usage like acct_associaiton_rec_t * or acct_wckey_rec_t *.<br> 
+<span class="commandline">type</span> (input) really
+slurmdbd_msg_type_t should let the plugin know what the structure is
+that was sent in some how.<br> 
+<span class="commandline">start</span> (input) start time of the usage.<br> 
+<span class="commandline">end</span> (input) end time of the usage.<br> 
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">SLURM_SUCCESS</span> on success, or<br>
+<span class="commandline">SLURM_ERROR</span> on failure.
+
+<p class="commandline">
+int acct_storage_p_roll_usage(void *db_conn, time_t sent_start)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+roll up association, cluster, and wckey usage in the storage.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type.<br>
+<span class="commandline">start</span> (input) start time of the rollup.<br> 
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">SLURM_SUCCESS</span> on success, or<br>
+<span class="commandline">SLURM_ERROR</span> on failure.
+
+<p class="commandline">
+int clusteracct_storage_p_node_down(void *db_conn, char *cluster,
+struct node_record *node_ptr, time_t event_time, char *reason)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Mark nodes down in the storage type.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type.<br>
+<span class="commandline">cluster</span> (input) name of cluster node
+is on.<br>
+<span class="commandline">node_ptr</span> (input) pointer to the node
+structure marked down.<br>
+<span class="commandline">event_time</span> (input) time event happened.<br> 
+<span class="commandline">reason</span> (input) if different from what
+is set in the node_ptr, the reason the node is down.<br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">SLURM_SUCCESS</span> on success, or<br>
+<span class="commandline">SLURM_ERROR</span> on failure.
+
+<p class="commandline">
+int clusteracct_storage_p_node_up(void *db_conn, char *cluster,
+struct node_record *node_ptr, time_t event_time)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Mark nodes up in the storage type.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type.<br>
+<span class="commandline">cluster</span> (input) name of cluster node
+is on.<br>
+<span class="commandline">node_ptr</span> (input) pointer to the node
+structure marked up.<br>
+<span class="commandline">event_time</span> (input) time event happened.<br> 
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">SLURM_SUCCESS</span> on success, or<br>
+<span class="commandline">SLURM_ERROR</span> on failure.
+
+<p class="commandline">
+int clusteracct_storage_p_cluster_procs(void *db_conn, char *cluster,
+uint32_t procs, time_t event_time)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Update storage type with the current number of processors on a given cluster.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type. <br>
+<span class="commandline">cluster</span> (input) name of cluster.<br>
+<span class="commandline">procs</span> (input) number of processors on
+system.<br>
+<span class="commandline">event_time</span> (input) time event happened.<br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">SLURM_SUCCESS</span> on success, or<br>
+<span class="commandline">SLURM_ERROR</span> on failure.
+
+<p class="commandline">
+int clusteracct_storage_p_get_usage(void *db_conn, uint32_t uid, void
+*cluster_rec, int type, time_t start, time_t end)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Get usage for a specific cluster.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type.<br>
+<span class="commandline">uid</span> (input) uid of user calling the
+function.<br> 
+<span class="commandline">cluster_rec</span> (input/out)
+acct_cluster_rec_t * already set with the cluster name.  Usage will be
+filled in.<br> 
+<span class="commandline">type</span> (input) really
+slurmdbd_msg_type_t should let the plugin know what the structure is
+that was sent in some how for this it is just DBD_GET_CLUSTER_USAGE.<br> 
+<span class="commandline">start</span> (input) start time of the usage.<br> 
+<span class="commandline">end</span> (input) end time of the usage.<br> 
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type. 
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">SLURM_SUCCESS</span> on success, or<br>
+<span class="commandline">SLURM_ERROR</span> on failure.
+
+<p class="commandline">
+int clusteracct_storage_p_register_ctld(void *db_conn, char *cluster,
+uint16_t port)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Used when a controller is turned on to tell the storage type where the
+  slurmctld for a given cluster is located at.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type.<br>
+<span class="commandline">cluster</span> (input) name of cluster.<br>
+<span class="commandline">port</span> (input) port on host cluster is
+running on the host is grabbed from the connection.<br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">SLURM_SUCCESS</span> on success, or<br>
+<span class="commandline">SLURM_ERROR</span> on failure.
+
+<p class="commandline">
+int jobacct_storage_p_job_start(void *db_conn, struct job_record *job_ptr) 
+<p style="margin-left:.2in"><b>Description</b>:<br>
+jobacct_storage_p_job_start() is called in the jobacct plugin when a
+job starts, inserting information into the database about the new job.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type.<br>
+<span class="commandline">job_ptr</span> (input) information about the job in 
+slurmctld.
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">SLURM_SUCCESS</span> on success, or<br>
+<span class="commandline">SLURM_ERROR</span> on failure.
+
+<p class="commandline">
+int jobacct_storage_p_job_complete(void *db_conn, struct job_record *job_ptr) 
+<p style="margin-left:.2in"><b>Description</b>:<br>
+jobacct_storage_p_job_complete() is called in the jobacct plugin when
+a job completes, this updates info about end of a job.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type.<br>
+<span class="commandline">job_ptr</span> (input) information about the job in 
+slurmctld.
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">SLURM_SUCCESS</span> on success, or<br>
+<span class="commandline">SLURM_ERROR</span> on failure.
+
+<p class="commandline">
+int jobacct_storage_p_step_start(void *db_conn, struct step_record *step_ptr) 
+<p style="margin-left:.2in"><b>Description</b>:<br>
+jobacct_storage_p_step_start() is called in the jobacct plugin at the
+allocation of a new step in the slurmctld, this inserts info about the
+beginning of a step.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type.<br>
+<span class="commandline">step_ptr</span> (input) information about the step in
+slurmctld.
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">SLURM_SUCCESS</span> on success, or<br>
+<span class="commandline">SLURM_ERROR</span> on failure.
+
+<p class="commandline">
+int jobacct_storage_p_step_complete(void *db_conn, struct step_record *step_ptr) 
+<p style="margin-left:.2in"><b>Description</b>:<br>
+jobacct_storage_p_step_complete() is called in the jobacct plugin at
+the end of a step in the slurmctld, this updates the ending
+information about a step.
+<p style="margin-left:.2in"><b>Arguments</b>:<br> 
+<span class="commandline">db_conn</span> (input) connection to
+the storage type.<br>
+<span class="commandline">step_ptr</span> (input) information about the step in
+slurmctld.
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">SLURM_SUCCESS</span> on success, or<br>
+<span class="commandline">SLURM_ERROR</span> on failure.
+
+<p class="commandline">
+int jobacct_storage_p_job_suspend(void *db_conn, struct job_record *job_ptr) 
+<p style="margin-left:.2in"><b>Description</b>:<br>
+jobacct_storage_p_suspend() is called in the jobacct plugin when a
+job is suspended or resumed in the slurmctld, this updates the
+database about the suspended time of the job. 
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type.<br>
+<span class="commandline">job_ptr</span> (input) information about the job in 
+slurmctld.
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">none</span>
+
+<p class="commandline">
+List jobacct_storage_p_get_jobs_cond(void *db_conn, uint32_t uid,
+acct_job_cond_t *job_cond) 
+<p style="margin-left:.2in"><b>Description</b>:<br>
+jobacct_storage_p_get_jobs_cond() is called to get a list of jobs from the
+database given the conditional. 
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type.<br>
+<span class="commandline">uid</span> (input) uid of user calling the function.<br> 
+<span class="commandline">job_cond</span> (input) conditional about
+which jobs to get.  Job ids should not need to be stated.<br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">List of job_rec_t's</span> on success, or<br>
+<span class="commandline">NULL</span> on failure.
+
+<p class="commandline">
+int jobacct_storage_p_archive(void *db_conn, acct_archive_cond_t *arch_cond) 
+<p style="margin-left:.2in"><b>Description</b>:<br>
+used to archive old data.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type.<br>
+<span class="commandline">arch_cond</span> (input) conditional about
+what to archive.<br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">SLURM_SUCCESS</span> on success, or<br>
+<span class="commandline">SLURM_ERROR</span> on failure.
+
+<p class="commandline">
+int jobacct_storage_p_archive_load(void *db_conn, acct_archive_rect *arch_rec) 
+<p style="margin-left:.2in"><b>Description</b>:<br>
+used to load old archive data.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type.<br>
+<span class="commandline">arch_rec</span> (input) information about
+what to load.<br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">SLURM_SUCCESS</span> on success, or<br>
+<span class="commandline">SLURM_ERROR</span> on failure.
+
+<p class="commandline">
+int acct_storage_p_update_shares_used(void *db_conn, List acct_list)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Used to update shares used in the storage type.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type.<br>
+<span class="commandline">acct_list</span> (input) List of shares_used_object_t.<br> 
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">SLURM_SUCCESS</span> on success, or<br>
+<span class="commandline">SLURM_ERROR</span> on failure.
+
+<p class="commandline">
+int acct_storage_p_flush_jobs_on_cluster(void *db_conn, char *cluster, time_t event_time) 
+<p style="margin-left:.2in"><b>Description</b>:<br>
+used to mark all jobs in the storage type as finished.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type.<br>
+<span class="commandline">cluster</span> (input) name of cluster to
+apply end to.<br>
+<span class="commandline">event_time</span> (input) when the flush happened.<br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">SLURM_SUCCESS</span> on success, or<br>
+<span class="commandline">SLURM_ERROR</span> on failure.
+
+<p class="footer"><a href="#top">top</a>
+
+
+<h2>Parameters</h2>
+<p>These parameters can be used in the slurm.conf to set up
+connections to the database all have defaults based on the plugin type
+used.
+<dl>
+<dt><span class="commandline">AccountingStorageType</span>
+<dd>Specifies which plugin should be used.
+<dt><span class="commandline">AccountingStorageLoc</span>
+<dd>Let the plugin the name of the logfile/database name to use.
+<dt><span class="commandline">AccountingStorageHost</span>
+<dd>Let the plugin know the host where the database is.
+<dt><span class="commandline">AccountingStoragePort</span>
+<dd>Let the plugin know the port to connect to.
+<dt><span class="commandline">AccountingStorageUser</span>
+<dd>Let the plugin know the name of the user to connect to the
+database with.
+<dt><span class="commandline">AccountingStoragePass</span>
+<dd>Let the plugin know the password of the user connecting to the database.
+<dt><span class="commandline">AccountingStorageEnforce</span>
+<dd>Specifies if we should enforce certain things be in existance
+  before allowing job submissions and such valid options are
+  "associations, limits, and wckeys". You can use any combination of
+  those listed.
+</dl>
+
+<h2>Versioning</h2>
+<p> This document describes version 1 of the SLURM Accounting Storage API. Future 
+releases of SLURM may revise this API. An Accounting Storage plugin conveys its
+ability to implement a particular API version using the mechanism outlined
+for SLURM plugins.
+<p class="footer"><a href="#top">top</a>
+
+<p style="text-align:center;">Last modified 10 February 2009</p>
+
+<!--#include virtual="footer.txt"-->
diff --git a/doc/html/bluegene.shtml b/doc/html/bluegene.shtml
index 0136108ee..799cbde1a 100644
--- a/doc/html/bluegene.shtml
+++ b/doc/html/bluegene.shtml
@@ -44,10 +44,12 @@ Seven new sbatch options are available:
 <i>--geometry</i> (specify job size in each dimension),
 <i>--no-rotate</i> (disable rotation of geometry), 
 <i>--conn-type</i> (specify interconnect type between base partitions, mesh or torus).
-<i>--blrts-image</i> (specify alternative blrts image for bluegene block.  Default if not set).
-<i>--linux-image</i> (specify alternative linux image for bluegene block.  Default if not set).
+<i>--blrts-image</i> (specify alternative blrts image for bluegene --block.  Default if not set, BGL only.)
+<i>--cnload-image</i> (specify alternative c-node image for bluegene block.  Default if not set, BGP only.)
+<i>--ioload-image</i> (specify alternative io image for bluegene block.  Default if not set, BGP only.)
+<i>--linux-image</i> (specify alternative linux image for bluegene block.  Default if not set, BGL only.)
 <i>--mloader-image</i> (specify alternative mloader image for bluegene block.  Default if not set).
-<i>--ramdisk-image</i> (specify alternative ramdisk image for bluegene block.  Default if not set).
+<i>--ramdisk-image</i> (specify alternative ramdisk image for bluegene block.  Default if not set, BGL only.)
 The <i>--nodes</i> option with a minimum and (optionally) maximum node count continues 
 to be available.  
 
@@ -193,16 +195,13 @@ keys scroll the window containing the text information.</p>
 
 <h2>System Administration</h2>
 
-<p>As of IBM's REV 2 driver SLURM must be built in 64-bit mode.  
-This can be done by specifying <b>CFLAGS=-m64</b>.  
-CFLAGS must be set for SLURM to compile correctly.</p>
-
 <p>Building a BlueGene compatible system is dependent upon the 
 <i>configure</i> program locating some expected files. 
-In particular, the configure script searches for <i>libdb2.so</i> in the 
-directories <i>/home/bgdb2cli/sqllib</i> and <i>/u/bgdb2cli/sqllib</i>.
-If your DB2 library file is in a different location, use the configure 
-option <i>--with-db2-dir=PATH</i> to specify the parent directory.
+In particular for a BlueGene/L system, the configure script searches
+for <i>libdb2.so</i> in the directories <i>/home/bgdb2cli/sqllib</i>
+and <i>/u/bgdb2cli/sqllib</i>.  If your DB2 library file is in a
+different location, use the configure
+option <i>--with-db2-dir=PATH</i> to specify the parent directory. 
 If you have the same version of the operating system on both the 
 Service Node (SN) and the Front End Nodes (FEN) then you can configure 
 and build one set of files on the SN and install them on both the SN and FEN.
@@ -238,7 +237,6 @@ This is because there are no job steps and we don't want to purge jobs premature
 The value of <i>SelectType</i> must be set to "select/bluegene" in order to have 
 node selection performed using a system aware of the system's topography 
 and interfaces. 
-The value of <i>SchedulerType</i> should be set to "sched/builtin".
 The value of <i>Prolog</i> should be set to the full pathname of a program that 
 will delay execution until the bgblock identified by the MPIRUN_PARTITION 
 environment variable is ready for use. It is recommended that you construct a script 
@@ -420,7 +418,7 @@ systems).
 the user.
 If you change the bgblock layout, then slurmctld and slurmd should 
 both be cold-started (e.g. <b>/etc/init.d/slurm startclean</b>).
-If you which to modify the <i>Numpsets</i> values 
+If you wish to modify the <i>Numpsets</i> values 
 for existing bgblocks, either modify them manually or destroy the bgblocks
 and let SLURM recreate them. 
 Note that in addition to the bgblocks defined in <i>bluegene.conf</i>, an 
@@ -435,7 +433,7 @@ A sample <i>bluegene.conf</i> file is shown below.
 #
 # BlrtsImage:           BlrtsImage used for creation of all bgblocks.
 # LinuxImage:           LinuxImage used for creation of all bgblocks.
-# tMloaderImage:         MloaderImage used for creation of all bgblocks.
+# MloaderImage:         MloaderImage used for creation of all bgblocks.
 # RamDiskImage:         RamDiskImage used for creation of all bgblocks.
 #
 # You may add extra images which a user can specify from the srun 
@@ -527,17 +525,18 @@ BridgeAPIVerbose=0
 # BPs=[000x001] Type=TORUS       # 1x1x2 = 2 midplanes
 ###############################################################################
 # volume = 1x1x1 = 1
-BPs=[000x000] Type=TORUS                         # 1x1x1 =  1 midplane
-BPs=[001x001] Type=SMALL NodeCards=4 Quarters=3  # 1x1x1 = 4-Nodecard sized 
-                                                 # cnode blocks 3-Base 
-                                                 # Partition Quarter sized 
-                                                 # c-node blocks
+BPs=[000x000] Type=TORUS                            # 1x1x1 =  1 midplane
+BPs=[001x001] Type=SMALL 32CNBlocks=4 128CNBlocks=3 # 1x1x1 = 4-Nodecard sized 
+                                                    # cnode blocks 3-Base 
+                                                    # Partition Quarter sized 
+                                                    # c-node blocks
 
 </pre></p>
 
 <p>The above <i>bluegene.conf</i> file defines multiple bgblocks to be 
 created in a single midplane (see the "SMALL" option). 
-Using this mechanism, up to 32 independent jobs can be executed 
+Using this mechanism, up to 32 independent jobs each consisting of 1
+  32 cnodes can be executed 
 simultaneously on a one-rack BlueGene system.
 If defining bgblocks of <i>Type=SMALL</i>, the SLURM partition 
 containing them as defined in <i>slurm.conf</i> must have the 
@@ -549,6 +548,10 @@ scheduler performance.
 As in all SLURM configuration files, parameters and values 
 are case insensitive.</p>
 
+<p> With a BlueGene/P system the image names are different.  The
+  correct image names are CnloadImage, MloaderImage, and IoloadImage.
+  You can also use alternate images just the same as discribed above.
+
 <p>One more thing is required to support SLURM interactions with 
 the DB2 database (at least as of the time this was written).
 DB2 database access is required by the slurmctld daemon only.
@@ -624,10 +627,15 @@ daemon serving as a front-end to those base partitions is not responding (on
 non-BlueGene systems, the slurmd actually does run on the compute 
 nodes, so the message is more meaningful there). </p>
 
-<p>Note that you can emulate a BlueGene system on stand-alone Linux 
+<p>Note that you can emulate a BlueGene/L system on stand-alone Linux 
 system. 
-Run <b>configure</b> with the <b>--enable-bluegene-emulation</b> option.
-This will define "HAVE_BG" and "HAVE_FRONT_END" in the config.h file. 
+Run <b>configure</b> with the <b>--enable-bgl-emulation</b> option.
+This will define "HAVE_BG", "HAVE_BGL", and "HAVE_FRONT_END" in the
+config.h file. 
+You can also emulate a BlueGene/P system with
+  the <b>--enable-bgp-emulation</b> option. 
+This will define "HAVE_BG", "HAVE_BGP", and "HAVE_FRONT_END" in the
+config.h file. 
 Then execute <b>make</b> normally. 
 These variables will build the code as if it were running 
 on an actual BlueGene computer, but avoid making calls to the
@@ -637,6 +645,6 @@ scheduling logic, etc. </p>
  
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 9 September 2008</p>
+<p style="text-align:center;">Last modified 17 March 2009</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/documentation.shtml b/doc/html/documentation.shtml
index 9136821b4..057095390 100644
--- a/doc/html/documentation.shtml
+++ b/doc/html/documentation.shtml
@@ -40,7 +40,7 @@ Also see <a href="publications.html">Publications and Presentations</a>.
 <li><a href="authplugins.shtml">Authentication Plugin Programmer Guide</a></li>
 <li><a href="crypto_plugins.shtml">Cryptographic Plugin Programmer Guild</a></li>
 <li><a href="jobacct_gatherplugins.shtml">Job Accounting Gather Plugin Programmer Guide</a></li>
-<li><a href="jobacct_storageplugins.shtml">Job Accounting Storage Plugin Programmer Guide</a></li>
+<li><a href="accounting_storageplugins.shtml">Accounting Storage Plugin Programmer Guide</a></li>
 <li><a href="checkpoint_plugins.shtml">Job Checkpoint Plugin Programmer Guide</a></li>
 <li><a href="jobcompplugins.shtml">Job Completion Logging Plugin Programmer Guide</a></li>
 <li><a href="mpiplugins.shtml">MPI Plugin Programmer Guide</a></li>
diff --git a/doc/html/jobacct_storageplugins.shtml b/doc/html/jobacct_storageplugins.shtml
deleted file mode 100644
index 107c16221..000000000
--- a/doc/html/jobacct_storageplugins.shtml
+++ /dev/null
@@ -1,204 +0,0 @@
-<!--#include virtual="header.txt"-->
-
-<h1><a name="top">SLURM Job Accounting Storage Plugin API</a></h1>
-
-<h2> Overview</h2>
-<p> This document describes SLURM Job Accounting Storage plugins and the API that
-defines them. It is intended as a resource to programmers wishing to write
-their own SLURM Job Accounting Storage plugins. This is version 1 of the API.
-
-<p>SLURM Job Accounting Storage plugins must conform to the
-SLURM Plugin API with the following specifications:
-
-<p><span class="commandline">const char
-plugin_name[]="<i>full&nbsp;text&nbsp;name</i>"
-<p style="margin-left:.2in">
-A free-formatted ASCII text string that identifies the plugin.
-
-<p><span class="commandline">const char
-plugin_type[]="<i>major/minor</i>"</span><br>
-<p style="margin-left:.2in">
-The major type must be &quot;jobacct_storage.&quot;
-The minor type can be any suitable name
-for the type of accounting package. We currently use
-<ul>
-<li><b>filetxt</b>&#151;Information written to a text file.
-<li><b>mysql</b>&#151; Store information in a mysql database.
-<li><b>pgsql</b>&#151; Store information in a postgresql database.
-<li><b>none</b>&#151; Information is not stored anywhere.
-</ul>
-<p>The programmer is urged to study 
-<span class="commandline">src/plugins/jobacct_storage/mysql</span>
-for a sample implementation of a SLURM Job Accounting Storage plugin.
-<p> The Job Accounting Storage plugin was written to be a interface
-to storage data collected by the Job Accounting Gather plugin.  When
-adding a new database you may want to add common functions in a common
-file in the src/common dir.  Refer to src/common/mysql_common.c/.h for an
-example so other plugins can also use that database type to write out
-information. 
-<p class="footer"><a href="#top">top</a>
-
-
-<h2>API Functions</h2>
-
-The Job Accounting Storage API uses hooks in the slurmctld.
-
-<p>All of the following functions are required. Functions which are not
-implemented must be stubbed. 
-
-<h4>Functions called by the jobacct_storage plugin</h4>
-
-<p class="commandline">int jobacct_storage_p_init(char *location) 
-<p style="margin-left:.2in"><b>Description</b>:<br>
-jobacct_storage_p_init() is called to initiate a connection to the
-database server and check the state of the database table to make sure
-they are in sync with the table definitions in the plugin.  
-Put global initialization here. Or open file or anything to initialize
-the plugin.
-<p style="margin-left:.2in"><b>Arguments</b>: <br>
-<span class="commandline">location</span> (input) database name or log
-file location.
-<p style="margin-left:.2in"><b>Returns</b>: <br>
-<span class="commandline">SLURM_SUCCESS</span> on success, or<br>
-<span class="commandline">SLURM_ERROR</span> on failure.
-
-<p class="commandline">int jobacct_storage_p_fini() 
-<p style="margin-left:.2in"><b>Description</b>:<br>
-jobacct_storage_p_fini() is called at the end of the program that has
-called jobacct_storage_p_init this function closes the connection to
-the database or logfile.
-<p style="margin-left:.2in"><b>Arguments</b>: <br>
-<span class="commandline">none</span>
-<p style="margin-left:.2in"><b>Returns</b>: <br>
-<span class="commandline">SLURM_SUCCESS</span> on success, or<br>
-<span class="commandline">SLURM_ERROR</span> on failure.
-
-<p class="commandline">
-int jobacct_storage_p_job_start(struct job_record *job_ptr) 
-<p style="margin-left:.2in"><b>Description</b>:<br>
-jobacct_storage_p_job_start() is called in the jobacct plugin when a
-job starts, inserting information into the database about the new job.
-<p style="margin-left:.2in"><b>Arguments</b>: <br>
-<span class="commandline">job_ptr</span> (input) information about the job in 
-slurmctld.
-<p style="margin-left:.2in"><b>Returns</b>: <br>
-<span class="commandline">SLURM_SUCCESS</span> on success, or<br>
-<span class="commandline">SLURM_ERROR</span> on failure.
-
-<p class="commandline">
-int jobacct_storage_p_job_complete(struct job_record *job_ptr) 
-<p style="margin-left:.2in"><b>Description</b>:<br>
-jobacct_storage_p_job_complete() is called in the jobacct plugin when
-a job completes, this updates info about end of a job.
-<p style="margin-left:.2in"><b>Arguments</b>: <br>
-<span class="commandline">job_ptr</span> (input) information about the job in 
-slurmctld.
-<p style="margin-left:.2in"><b>Returns</b>: <br>
-<span class="commandline">SLURM_SUCCESS</span> on success, or<br>
-<span class="commandline">SLURM_ERROR</span> on failure.
-
-<p class="commandline">
-int jobacct_storage_p_step_start(struct step_record *step_ptr) 
-<p style="margin-left:.2in"><b>Description</b>:<br>
-jobacct_storage_p_step_start() is called in the jobacct plugin at the
-allocation of a new step in the slurmctld, this inserts info about the
-beginning of a step.
-<p style="margin-left:.2in"><b>Arguments</b>: <br>
-<span class="commandline">step_ptr</span> (input) information about the step in
-slurmctld.
-<p style="margin-left:.2in"><b>Returns</b>: <br>
-<span class="commandline">SLURM_SUCCESS</span> on success, or<br>
-<span class="commandline">SLURM_ERROR</span> on failure.
-
-<p class="commandline">
-int jobacct_storage_p_step_complete(struct step_record *step_ptr) 
-<p style="margin-left:.2in"><b>Description</b>:<br>
-jobacct_storage_p_step_complete() is called in the jobacct plugin at
-the end of a step in the slurmctld, this updates the ending
-information about a step.
-<p style="margin-left:.2in"><b>Arguments</b>:<br> 
-<span class="commandline">step_ptr</span> (input) information about the step in
-slurmctld.
-<p style="margin-left:.2in"><b>Returns</b>: <br>
-<span class="commandline">SLURM_SUCCESS</span> on success, or<br>
-<span class="commandline">SLURM_ERROR</span> on failure.
-
-<p class="commandline">
-int jobacct_storage_p_suspend(struct job_record *job_ptr) 
-<p style="margin-left:.2in"><b>Description</b>:<br>
-jobacct_storage_p_suspend() is called in the jobacct plugin when a
-job is suspended or resumed in the slurmctld, this updates the
-database about the suspended time of the job. 
-<p style="margin-left:.2in"><b>Arguments</b>: <br>
-<span class="commandline">job_ptr</span> (input) information about the job in 
-slurmctld.
-<p style="margin-left:.2in"><b>Returns</b>: <br>
-<span class="commandline">none</span>
-
-<p class="commandline">
-void jobacct_storage_p_get_jobs(List job_list, List selected_steps,
-List selected_parts, void *params) 
-<p style="margin-left:.2in"><b>Description</b>:<br>
-jobacct_storage_p_get_jobs() is called to get a list of jobs from the
-database given the specific inputs. 
-<p style="margin-left:.2in"><b>Arguments</b>: <br>
-<span class="commandline">List job_list </span> (input/output) list to
-be filled with jobacct_job_rec_t.<br>
-<span class="commandline">List selected_steps </span>
-(input) list containing type jobacct_select_step_t to query against.<br>
-<span class="commandline">List selected_parts </span>
-(input) list containing char *'s of names of partitions to query against.<br>
-<span class="commandline">void *params </span>
-(input) to be cast as sacct_parameters_t in the plugin.
-
-<p style="margin-left:.2in">jobacct_job_rec_t, jobacct_select_step_t,
-and sacct_parameters_t are
-all defined in common/slurm_jobacct.h
-<p style="margin-left:.2in"><b>Returns</b>: <br>
-<span class="commandline">none</span> 
-
-<p class="commandline">
-void jobacct_storage_p_archive(List selected_parts, void *params) 
-<p style="margin-left:.2in"><b>Description</b>:<br>
-database_p_jobcomp_archive() used to archive old data.
-<p style="margin-left:.2in"><b>Arguments</b>: <br>
-<span class="commandline">List selected_parts </span>
-(input) list containing char *'s of names of partitions to query against.<br>
-<span class="commandline">void *params </span>
-(input) to be cast as sacct_parameters_t in the plugin.
-<p style="margin-left:.2in"><b>Returns</b>: <br>
-<span class="commandline">none</span> 
-
-<p class="footer"><a href="#top">top</a>
-
-
-<h2>Parameters</h2>
-<p>These parameters can be used in the slurm.conf to set up
-connections to the database all have defaults based on the plugin type
-used.
-<dl>
-<dt><span class="commandline">JobAcctStorageType</span>
-<dd>Specifies which plugin should be used.
-<dt><span class="commandline">JobAcctStorageLoc</span>
-<dd>Let the plugin the name of the logfile/database name to use.
-<dt><span class="commandline">JobAcctStorageHost</span>
-<dd>Let the plugin know the host where the database is.
-<dt><span class="commandline">JobAcctStoragePort</span>
-<dd>Let the plugin know the port to connect to.
-<dt><span class="commandline">JobAcctStorageUser</span>
-<dd>Let the plugin know the name of the user to connect to the
-database with.
-<dt><span class="commandline">JobAcctStoragePass</span>
-<dd>Let the plugin know the password of the user connecting to the database.
-</dl>
-
-<h2>Versioning</h2>
-<p> This document describes version 1 of the SLURM Job Accounting Storage API. Future 
-releases of SLURM may revise this API. A Job Accounting Storage plugin conveys its
-ability to implement a particular API version using the mechanism outlined
-for SLURM plugins.
-<p class="footer"><a href="#top">top</a>
-
-<p style="text-align:center;">Last modified 23 May 2007</p>
-
-<!--#include virtual="footer.txt"-->
diff --git a/doc/html/overview.shtml b/doc/html/overview.shtml
index 5184d8464..06c2270e5 100644
--- a/doc/html/overview.shtml
+++ b/doc/html/overview.shtml
@@ -54,9 +54,8 @@ building block approach. These plugins presently include:
 <li><a href="crypto_plugins.html">Cryptography</a>: Munge or OpenSSL</li>
 <li><a href="jobacct_gatherplugins.html">Job Accounting Gather</a>: AIX, Linux, or none(default)</li>
 
-<li><a href="jobacct_storageplugins.html">Job Accounting Storage</a>: 
+<li><a href="accounting_storageplugins.html">Accounting Storage</a>: 
 text file (default if jobacct_gather != none), 
-<a href="http://www.clusterresources.com/pages/products/gold-allocation-manager.php">Gold</a>
 MySQL, PGSQL, SlurmDBD (Slurm Database Daemon) or none</li>
 
 <li><a href="jobcompplugins.html">Job completion logging</a>: 
@@ -67,7 +66,7 @@ MPICH-GM, MPICH-MX, MVAPICH, OpenMPI and none (default, for most
 other versions of MPI including MPICH2 and MVAPICH2).</li>
 
 <li><a href="selectplugins.html">Node selection</a>: 
-Blue Gene (a 3-D torus interconnect), 
+Bluegene (a 3-D torus interconnect BGL or BGP), 
 <a href="cons_res.html">consumable resources</a> (to allocate 
 individual processors and memory) or linear (to dedicate entire nodes).</li>
 
diff --git a/doc/html/programmer_guide.shtml b/doc/html/programmer_guide.shtml
index 74d13cfd2..50a402d14 100644
--- a/doc/html/programmer_guide.shtml
+++ b/doc/html/programmer_guide.shtml
@@ -95,11 +95,11 @@ SLURM.<br>
 <b>plugins</b>&#151;Plugin functions for various infrastructure. A separate 
 subdirectory is used for each plugin class:<br> 
 <ul>
+<li><b>accounting_storage</b> for specifing the type of storage for accounting,<br>
 <li><b>auth</b> for user authentication,<br> 
 <li><b>checkpoint</b> for system-initiated checkpoint and restart of user jobs,<br>
 <li><b>crypto</b> for cryptographic functions,<br>
 <li><b>jobacct_gather</b> for job accounting,<br>
-<li><b>jobacct_storage</b> for specifing the type of storage for job accounting,<br>
 <li><b>jobcomp</b> for job completion logging,<br>
 <li><b>mpi</b> for MPI support,<br>
 <li><b>proctrack</b> for process tracking,<br>
@@ -109,6 +109,8 @@ subdirectory is used for each plugin class:<br>
 <li><b>task</b> for task affinity to processors.<br>
 </ul>
 <p style="margin-left:.2in">
+<b>sacct</b>&#151;User command to view accounting information about jobs.<br>
+<b>sacctmgr</b>&#151;User and administrator tool to manage accounting.<br>
 <b>salloc</b>&#151;User command to allocate resources for a job.<br>
 <b>sattach</b>&#151;User command to attach standard input, output and error 
 files to a running job or job step.<br>
@@ -122,12 +124,15 @@ with an existing SLURM job.<br>
 <b>slurmd</b>&#151;SLURM daemon code to manage the compute server nodes including 
 the execution of user applications.<br>
 <b>smap</b>&#151;User command to view layout of nodes, partitions, and jobs.
-This is particularly valuable on systems like Blue Gene, which has a three
+This is particularly valuable on systems like Bluegene, which has a three
 dimension torus topography.<br>
 <b>squeue</b>&#151;User command to get information on SLURM jobs and job steps.<br>
+<b>sreport</b>&#151;User command to view various reports about past
+usage across the enterprise.<br>
 <b>srun</b>&#151;User command to submit a job, get an allocation, and/or 
 initiation a parallel job step.<br>
-<b>strigger</b>&#151;User and administrator to manage event triggers.<br>
+<b>sstat</b>&#151;User tool to status running jobs.<br>
+<b>strigger</b>&#151;User and administrator tool to manage event triggers.<br>
 <b>sview</b>&#151;User command to view and update node, partition, and job 
 job state information.<br>
 
diff --git a/doc/html/review_release.html b/doc/html/review_release.html
index 3755b3835..607be0949 100644
--- a/doc/html/review_release.html
+++ b/doc/html/review_release.html
@@ -9,6 +9,7 @@
 <b>NOTE: Do not follow links.</b>
 <ul>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/accounting.html">accounting.html</a></li>
+<li><a href="https://computing-pre.llnl.gov/linux/slurm/accounting_storageplugins.html">accounting_storageplugins.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/api.html">api.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/authplugins.html">authplugins.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/big_sys.html">big_sys.html</a></li>
@@ -25,7 +26,6 @@
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/help.html">help.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/ibm.html">ibm.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/jobacct_gatherplugins.html">jobacct_gatherplugins.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/jobacct_storageplugins.html">jobacct_storageplugins.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/jobcompplugins.html">jobcompplugins.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/mail.html">mail.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/maui.html">maui.html</a></li>
diff --git a/doc/html/team.shtml b/doc/html/team.shtml
index 403a9c3c8..9231938dc 100644
--- a/doc/html/team.shtml
+++ b/doc/html/team.shtml
@@ -61,13 +61,14 @@ Networking, Italy)</li>
 <li>Federico Sacerdoti (D.E. Shaw)<li>
 <li>Jeff Squyres (LAM MPI)</li>
 <li>Prashanth Tamraparni (HP, India)</li>
-<li>Adam Todorski (Rensselaer Polytechnic Institute)</li>
+<li>Jimmy Tang (Trinity College, Ireland)</li>
 <li>Kevin Tew (LLNL/Bringham Young University)</li>
+<li>Adam Todorski (Rensselaer Polytechnic Institute)</li>
 <li>Tim Wickberg (Rensselaer Polytechnic Institute)</li>
 <li>Jay Windley (Linux NetworX)</li>
 <li>Anne-Marie Wunderlin (Bull)</li>
 </ul>
 
-<p style="text-align:center;">Last modified 1 November 2008</p>
+<p style="text-align:center;">Last modified 20 January 2009</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/man/man1/sacct.1 b/doc/man/man1/sacct.1
index 69050e2f9..035861fd1 100644
--- a/doc/man/man1/sacct.1
+++ b/doc/man/man1/sacct.1
@@ -24,7 +24,7 @@ The
 command displays information on jobs, job steps, status, and exitcodes by 
 default.
 You can tailor the output with the use of the 
-\f3\-\-fields=\fP 
+\f3\-\-format=\fP 
 option to specify the fields to be shown.
 .PP
 For the root user, the 
@@ -51,6 +51,17 @@ gather and report incomplete information for these calls;
 \f3sacct\fP reports values of 0 for this missing data. See your systems
 \f2getrusage(3)\fP man page for information about which data are
 actually available on your system.
+.IP
+If --dump is specified, the field selection options (--brief,
+--format, ...) have no effect. 
+.IP
+Elapsed time fields are presented as 2 fields, integral seconds and integral microseconds
+.IP
+If --dump is not specified, elapsed time fields are presented as
+[[days-]hours:]minutes:seconds.hundredths.
+.IP
+The default input file is the file named in the jobacct_logfile
+parameter in slurm.conf.
 
 .SS "Options"
 .TP "10"
@@ -60,7 +71,13 @@ Displays the job accounting data for all jobs in the job accounting log file.
 This is the default behavior when the 
 .BR "sacct "
 command is executed by the root user.
-
+.TP 
+\f3\-A \fP\f2account_list\fP\f3,\fP  \f3\-\-accounts\fP\f3=\fP\f2account_list\fP
+Displays the statistics only for the jobs started on the accounts specified by
+the \f2account_list\fP operand, which is a comma\-separated list of
+account names.
+Space characters are not allowed in the \f2account_list\fP. Default is
+all accounts\&.
 .TP 
 \f3\-b \fP\f3,\fP \f3\-\-brief\fP
 Displays a brief listing, which includes the following data:
@@ -81,7 +98,7 @@ This option has no effect when the
 option is also specified.
 
 .TP 
-\f3\-C \fP\f2cluster_list\fP\f3,\fP  \f3\-\-cluster\fP\f3=\fP\f2cluster_list\fP
+\f3\-C \fP\f2cluster_list\fP\f3,\fP  \f3\-\-clusters\fP\f3=\fP\f2cluster_list\fP
 Displays the statistics only for the jobs started on the clusters specified by
 the \f2cluster_list\fP operand, which is a comma\-separated list of clusters.
 Space characters are not allowed in the \f2cluster_list\fP. \-1 for
@@ -95,12 +112,31 @@ Displays (dumps) the raw data records.
 This option overrides the 
 \f3\-\-brief\fP 
 and 
-\f3\-\-fields=\fP 
+\f3\-\-format=\fP 
 options.
 .IP 
 The section titled "INTERPRETING THE \-\-dump OPTION OUTPUT" describes the 
 data output when this option is used.
 
+.TP 
+\f3\-\-duplicates\fP
+If SLURM job ids are reset, but the job accounting log file isn't
+reset at the same time (with -e, for example), some job numbers will
+probably appear more than once in the accounting log file to refer to
+different jobs; such jobs can be distinguished by the "submit" time
+stamp in the data records.
+.IP 
+When data for specific jobs are requested with the --jobs option, we
+assume that the user wants to see only the most recent job with that
+number. This behavior can be overridden by specifying --duplicates, in
+which case all records that match the selection criteria will be returned.
+.IP
+When --jobs is not specified, we report data for all jobs that match
+the selection criteria, even if some of the job numbers are
+reused. Specify that you only want the most recent job for each
+selected job number with the --noduplicates option.
+
+
 .TP
 \f3\-e \fP\f2time_spec\fP \f3,\fP \f3\-\-expire=\fP\f2time_spec\fP
 .IP
@@ -121,30 +157,38 @@ log file, with ".expired" appended to the file name. For example, if
 the accounting log file is /var/log/slurmacct.log, the expired log
 file will be /var/log/slurmacct.log.expired.
 
+.TP
+\f3\-\-endtime\fP\f3=\fP\f2endtime\fP
+Select jobs eligible before this time.
+Valid Formats are.
+	HH:MM[:SS] [AM|PM]
+	MMDD[YY] or MM/DD[/YY] or MM.DD[.YY]
+	MM/DD[/YY]-HH:MM[:SS]
+
 .TP 
-\f3\-F \fP\f2field_list\fP \f3,\fP  \f3\-\-fields\fP\f3=\fP\f2field_list\fP
+\f3\-F \fP\f2format_list\fP \f3,\fP  \f3\-\-format\fP\f3=\fP\f2format_list\fP
 Displays the job accounting data specified by the 
-\f2field_list\fP 
+\f2format_list\fP 
 operand, which is a comma\-separated list of fields.
 Space characters are not allowed in the 
-\f2field_list\fP\c
+\f2format_list\fP\c
 \&. 
 .IP 
 See the 
-\f3\-\-help\-fields\fP 
+\f3\-\-helpformat\fP 
 option for a list of the available fields.
 See the section titled "Job Accounting Fields" for a description of 
 each field.
 .IP 
 The job accounting data is displayed in the order specified by the 
-\f2field_list\fP 
+\f2format_list\fP 
 operand.
 Thus, the following two commands display the same data but in different order:
 .RS 
 .PP
 .nf 
 .ft 3
-# sacct \-\-fields=jobid,status
+# sacct \-\-format=jobid,status
 Jobid    Status
 \-\-\-\-\-\-\-\-\-\- \-\-\-\-\-\-\-\-\-\-
 3          COMPLETED
@@ -157,7 +201,7 @@ Jobid    Status
 .PP
 .nf 
 .ft 3
-# sacct \-\-fields=status,jobid
+# sacct \-\-format=status,jobid
 Status     Jobid
 \-\-\-\-\-\-\-\-\-\- \-\-\-\-\-\-\-\-\-\-
 COMPLETED  3
@@ -204,9 +248,9 @@ Default is no restrictions.  This is virtually the same as the \-\-gid option\&.
 \f3\-h \fP\f3,\fP \f3\-\-help\fP
 Displays a general help message.
 .TP 
-\f3\-\-help\-fields\fP
+\f3\-\-helpformat\fP
 Displays a list of fields that can be specified with the 
-\f3\-\-fields\fP 
+\f3\-\-format\fP 
 option.
 .RS 
 .PP
@@ -214,16 +258,16 @@ option.
 .ft 3
 Fields available:
 account     associd     cluster     cpu       
-cputime     elapsed     end         exitcode  
-finished    gid         group       job       
-jobid       jobname     ncpus       nodes     
-nnodes      nprocs      ntasks      pages     
-partition   rss         start       state     
-status      submit      timelimit   submitted 
-systemcpu   uid         user        usercpu   
-vsize       blockid     connection  geo       
-max_procs   reboot      rotate      bg_start_point
-wckey     
+cputime     elapsed     eligible    end       
+exitcode    finished    gid         group     
+job         jobid       jobname     ncpus     
+nodes       nnodes      nprocs      ntasks    
+pages       partition   rss         start     
+state       status      submit      timelimit 
+submitted   systemcpu   uid         user      
+usercpu     vsize       blockid     connection
+geo         max_procs   reboot      rotate    
+bg_start_point  wckey     
 
 .ft 1
 .fi 
@@ -284,6 +328,10 @@ Displays a long listing, which includes the following data:
 \f3exitcode\fP 
 .RE 
 
+.TP 
+\f3\-\-noduplicates\fP
+See the discussion under --duplicates.
+
 .TP 
 \f3\-\-noheader\fP
 Prevents the display of the heading over the output.
@@ -299,6 +347,10 @@ Dumps accounting records in an easy\-to\-read format.
 .IP 
 This option is provided for debugging.
 
+.TP
+\f3\-P \fP\f3,\fP \f3\-\-purge\fP
+Used in conjunction with --expire to remove invalid data from the job accounting log.
+
 .TP 
 \f3\-p \fP\f2partition_list\fP \f3,\fP  \f3\-\-partition\fP\f3=\fP\f2partition_list\fP
 Displays information about jobs and job steps specified by the 
@@ -380,6 +432,14 @@ Space characters are not allowed in the
 \f2state_list\fP\c
 \&.
 
+.TP
+\f3\-\-starttime\fP\f3=\fP\f2starttime\fP
+Select jobs eligible after this time.
+Valid Formats are.
+	HH:MM[:SS] [AM|PM]
+	MMDD[YY] or MM/DD[/YY] or MM.DD[.YY]
+	MM/DD[/YY]-HH:MM[:SS]
+
 .TP 
 \f3\-t \fP\f3,\fP \f3\-\-total\fP
 Displays only the cumulative statistics for each job.
@@ -410,6 +470,14 @@ Displays a help message.
 Reports the state of certain variables during processing.
 This option is primarily used for debugging.
 
+.TP 
+\f3\-W \fP\f2wckey_list\fP\f3,\fP  \f3\-\-wckeys\fP\f3=\fP\f2wckey_list\fP
+Displays the statistics only for the jobs started on the wckeys specified by
+the \f2wckey_list\fP operand, which is a comma\-separated list of
+wckey names.
+Space characters are not allowed in the \f2wckey_list\fP. Default is
+all wckeys\&.
+
 .SS "Job Accounting Fields"
 The following describes each job accounting field:
 .RS 
diff --git a/doc/man/man1/sacctmgr.1 b/doc/man/man1/sacctmgr.1
index 4bf2c3f7b..ca40ffc6b 100644
--- a/doc/man/man1/sacctmgr.1
+++ b/doc/man/man1/sacctmgr.1
@@ -228,7 +228,7 @@ You can still set this, but have to wait for future versions of SLURM
 before it is enforced.)
 
 .TP
-\fIGrpJobs\fP=<max cpus>
+\fIGrpJobs\fP=<max jobs>
 Maximum number of running jobs in aggregate for 
 this association and all association which are children of this association.
 To clear a previously set value use the modify command with a new value of \-1.
diff --git a/doc/man/man1/salloc.1 b/doc/man/man1/salloc.1
index 6f89528e9..62778dcfd 100644
--- a/doc/man/man1/salloc.1
+++ b/doc/man/man1/salloc.1
@@ -592,6 +592,16 @@ Explicitly exclude certain nodes from the resources granted to the job.
 The following options support Blue Gene systems, but may be 
 applicable to other systems as well.
 
+.TP
+\fB\-\-blrts\-image\fR[=]<\fIpath\fR>
+Path to blrts image for bluegene block.  BGL only.
+Default from \fIblugene.conf\fR if not set.
+
+.TP
+\fB\-\-cnload\-image\fR=\fIpath\fR
+Path to compute node image for bluegene block.  BGP only.
+Default from \fIblugene.conf\fR if not set.
+
 .TP
 \fB\-\-conn\-type\fR[=]<\fItype\fR>
 Require the partition connection type to be of a certain type.  
@@ -599,6 +609,9 @@ On Blue Gene the acceptable of \fItype\fR are MESH, TORUS and NAV.
 If NAV, or if not set, then SLURM will try to fit a TORUS else MESH.
 You should not normally set this option.
 SLURM will normally allocate a TORUS if possible for a given geometry.
+If running on a BGP system and wanting to run in HTC mode (only for 1
+midplane and below).  You can use HTC_S for SMP, HTC_D for Dual, HTC_V
+for virtual node mode, and HTC_L for Linux mode.
 
 .TP
 \fB\-g\fR, \fB\-\-geometry\fR[=]<\fIXxYxZ\fR>
@@ -608,12 +621,32 @@ Z directions. For example "\-\-geometry=2x3x4", specifies a block
 of nodes having 2 x 3 x 4 = 24 nodes (actually base partitions on 
 Blue Gene).
 
+.TP
+\fB\-\-ioload\-image\fR=\fIpath\fR
+Path to io image for bluegene block.  BGP only.
+Default from \fIblugene.conf\fR if not set.
+
+.TP
+\fB\-\-linux\-image\fR[=]<\fIpath\fR>
+Path to linux image for bluegene block.  BGL only.
+Default from \fIblugene.conf\fR if not set.
+
+.TP
+\fB\-\-mloader\-image\fR[=]<\fIpath\fR>
+Path to mloader image for bluegene block.
+Default from \fIblugene.conf\fR if not set.
+
 .TP
 \fB\-R\fR, \fB\-\-no\-rotate\fR
 Disables rotation of the job's requested geometry in order to fit an 
 appropriate partition.
 By default the specified geometry can rotate in three dimensions.
 
+.TP
+\fB\-\-ramdisk\-image\fR[=]<\fIpath\fR>
+Path to ramdisk image for bluegene block.  BGL only.
+Default from \fIblugene.conf\fR if not set.
+
 .TP
 \fB\-\-reboot\fR
 Force the allocated nodes to reboot before starting the job.
diff --git a/doc/man/man1/sbatch.1 b/doc/man/man1/sbatch.1
index 77d4f36a1..c58ff0a22 100644
--- a/doc/man/man1/sbatch.1
+++ b/doc/man/man1/sbatch.1
@@ -656,6 +656,13 @@ will be sorted my SLURM.
 Specify wckey to be used with job.  If TrackWCKey=no (default) in the
 slurm.conf this value does not get looked at. 
 
+.TP
+\fB\-\-wrap\fR[=]<\fIcommand string\fR>
+Sbatch will wrap the specified command string in a simple "sh" shell script,
+and submit that script to the slurm controller.  When \-\-wrap is used,
+a script name and arguments may not be specified on the command line; instead
+the sbatch-generated wrapper script is used.
+
 .TP
 \fB\-x\fR, \fB\-\-exclude\fR[=]<\fInode name list\fR>
 Explicitly exclude certain nodes from the resources granted to the job.
@@ -666,7 +673,12 @@ applicable to other systems as well.
 
 .TP
 \fB\-\-blrts\-image\fR[=]<\fIpath\fR>
-Path to blrts image for bluegene block.
+Path to blrts image for bluegene block.  BGL only.
+Default from \fIblugene.conf\fR if not set.
+
+.TP
+\fB\-\-cnload\-image\fR=\fIpath\fR
+Path to compute node image for bluegene block.  BGP only.
 Default from \fIblugene.conf\fR if not set.
 
 .TP
@@ -676,6 +688,9 @@ On Blue Gene the acceptable of \fItype\fR are MESH, TORUS and NAV.
 If NAV, or if not set, then SLURM will try to fit a TORUS else MESH.
 You should not normally set this option.
 SLURM will normally allocate a TORUS if possible for a given geometry.
+If running on a BGP system and wanting to run in HTC mode (only for 1
+midplane and below).  You can use HTC_S for SMP, HTC_D for Dual, HTC_V
+for virtual node mode, and HTC_L for Linux mode.
 
 .TP
 \fB\-g\fR, \fB\-\-geometry\fR[=]<\fIXxYxZ\fR>
@@ -685,9 +700,14 @@ Z directions. For example "\-\-geometry=2x3x4", specifies a block
 of nodes having 2 x 3 x 4 = 24 nodes (actually base partions on 
 Blue Gene).
 
+.TP
+\fB\-\-ioload\-image\fR=\fIpath\fR
+Path to io image for bluegene block.  BGP only.
+Default from \fIblugene.conf\fR if not set.
+
 .TP
 \fB\-\-linux\-image\fR[=]<\fIpath\fR>
-Path to linux image for bluegene block.
+Path to linux image for bluegene block.  BGL only.
 Default from \fIblugene.conf\fR if not set.
 
 .TP
@@ -703,20 +723,13 @@ By default the specified geometry can rotate in three dimensions.
 
 .TP
 \fB\-\-ramdisk\-image\fR[=]<\fIpath\fR>
-Path to ramdisk image for bluegene block.
+Path to ramdisk image for bluegene block.  BGL only.
 Default from \fIblugene.conf\fR if not set.
 
 .TP
 \fB\-\-reboot\fR
 Force the allocated nodes to reboot before starting the job.
 
-.TP
-\fB\-\-wrap\fR[=]<\fIcommand string\fR>
-Sbatch will wrap the specified command string in a simple "sh" shell script,
-and submit that script to the slurm controller.  When \-\-wrap is used,
-a script name and arguments may not be specified on the command line; instead
-the sbatch-generated wrapper script is used.
-
 .SH "INPUT ENVIRONMENT VARIABLES"
 .PP
 Upon startup, sbatch will read and handle the options set in the following
diff --git a/doc/man/man1/scontrol.1 b/doc/man/man1/scontrol.1
index 4a4b1f7fb..0c9b19cd7 100644
--- a/doc/man/man1/scontrol.1
+++ b/doc/man/man1/scontrol.1
@@ -447,7 +447,12 @@ To permit all groups to use the partition specify "AllowGroups=ALL".
 .TP
 \fIDefault\fP=<yes|no>
 Specify if this partition is to be used by jobs which do not explicitly 
-identify a partition to use. Possible values are"YES" and "NO".
+identify a partition to use. 
+Possible output values are "YES" and "NO".
+In order to change the default partition of a running system, 
+use the scontrol update command and set Default=yes for the partition
+that you want to become the new default.
+
 .TP
 \fIHidden\fP=<yes|no>
 Specify if the partition and its jobs should be hidden from view. 
diff --git a/doc/man/man1/squeue.1 b/doc/man/man1/squeue.1
index 28a453d12..f933846ed 100644
--- a/doc/man/man1/squeue.1
+++ b/doc/man/man1/squeue.1
@@ -264,6 +264,11 @@ Extended processor information: number of requested sockets, cores,
 threads (S:C:T) per node for the job.
 .RE
 
+.TP
+\fB\-U <account_list>\fR, \fB\-\-account=<account_list>\fR
+Specify the accounts of the jobs to view. Accepts a comma separated 
+list of account names. This have no effect when listing job steps.
+
 .TP
 \fB\-p <part_list>\fR, \fB\-\-partition=<part_list>\fR
 Specify the partitions of the jobs or steps to view. Accepts a comma separated 
diff --git a/doc/man/man1/sreport.1 b/doc/man/man1/sreport.1
index 219ab933c..3232b168f 100644
--- a/doc/man/man1/sreport.1
+++ b/doc/man/man1/sreport.1
@@ -4,10 +4,10 @@
 sreport \- Used to generate reports from the slurm accounting data.
 
 .SH "SYNOPSIS"
-\fBssreport\fR [\fIOPTIONS\fR...] [\fICOMMAND\fR...]
+\fBsreport\fR [\fIOPTIONS\fR...] [\fICOMMAND\fR...]
 
 .SH "DESCRIPTION"
-\fBssreport\fR is used to generate certain reports.  More can be added
+\fBsreport\fR is used to generate certain reports.  More can be added
 at any time.  It provides a view into accounting data gathered from slurm via
 the account information maintained within a database with the interface 
 being provided by the \fBslurmdbd\fR (Slurm Database daemon).
@@ -20,7 +20,7 @@ Use all clusters instead of only cluster from where the command was run.
 
 .TP
 \fB\-h\fR, \fB\-\-help\fR
-Print a help message describing the usage of \fBssreport\fR.
+Print a help message describing the usage of \fBsreport\fR.
 This is equivalent to the \fBhelp\fR command.
 
 .TP
@@ -130,7 +130,7 @@ Repeat the last command executed.
 Various reports are as follows...
      cluster \- AccountUtilizationByUser, UserUtilizationByAccount,
      UserUtilizationByWckey, Utilization, WCKeyUtilizationByUser
-     job     \- Sizes
+     job     \- SizesByAccount, SizesByWckey
      user    \- TopUsage
 
 .TP
@@ -168,12 +168,17 @@ This report will display wckey utilization sorted by WCKey name for
 each user on each cluster.  
 
 .TP
-.B job Sizes
+.B job SizesByAccount
 This report will dispay the amount of time used for job ranges
 specified by the 'grouping=' option.  Only a single level in the tree
 is displayed defaulting to the root dir.  If you specify other
-accounts with the 'account=' option you will receive those accounts
-sub accounts.
+accounts with the 'account=' option sreport will use those accounts as
+the root account and you will receive the sub accounts for the
+accounts listed.
+.TP
+.B job SizesByWckey
+This report will dispay the amount of time for each wckey for job ranges
+specified by the 'grouping=' option.  
 
 .TP
 .B user TopUsage
@@ -191,26 +196,47 @@ COMMON FOR ALL TYPES
 .B All_Clusters
 Use all monitored clusters default is local cluster.
 .TP
+.B Clusters=<OPT>
+List of clusters to include in report.  Default is local cluster.
+.TP
 .B End=<OPT>
 Period ending for report. Default is 23:59:59 of previous day.
+Valid time formats are...
+HH:MM[:SS] [AM|PM]
+MMDD[YY] or MM/DD[/YY] or MM.DD[.YY]
+MM/DD[/YY]-HH:MM[:SS]         
 .TP
 .B Format=<OPT>
 Comma separated list of fields to display in report.
 .TP
 .B Start=<OPT>
 Period start for report.  Default is 00:00:00 of previous day.
+Valid time formats are...
+HH:MM[:SS] [AM|PM]
+MMDD[YY] or MM/DD[/YY] or MM.DD[.YY]
+MM/DD[/YY]-HH:MM[:SS]         
 .RE
 
 .TP
 CLUSTER
 .RS
 .TP
-.B Names=<OPT>
-List of clusters to include in report.  Default is local cluster.
+.B Accounts=<OPT>
+When used with the UserUtilizationByAccount, or
+AccountUtilizationByUser, List of accounts to include in report.
+Default is all. 
 .TP
 .B Tree
 When used with the AccountUtilizationByUser report will span the
 accounts as they in the hierarchy.
+.TP
+.B Users=<OPT>
+When used with any report other than Utilization, List of users to
+include in report.  Default is all.
+.TP
+.B Wckeys=<OPT>
+When used with the UserUtilizationByWckey or WCKeyUtilizationByUser,
+List of wckeys to include in report. Default is all. 
 .RE
 
 .TP
@@ -218,10 +244,17 @@ JOB
 .RS
 .TP
 .B Accounts=<OPT>
-List of accounts to use for the report Default is all.
-.TP
-.B Clusters=<OPT>
-List of clusters to include in report.  Default is local cluster.
+List of accounts to use for the report Default is all.  The SizesByAccount
+report only displays 1 hierarchical level. If accounts are specified
+the next layer of accounts under those specified will be displayed,
+not the accounts specified.  In the SizesByAccount reports the default
+for accounts is root.  This explanation does not apply when ran with
+the FlatView option.
+.TP
+.B FlatView
+When used with the SizesbyAccount will not group accounts in a
+hierarchical level, but print each account where jobs ran on a
+separate line without any hierarchy.
 .TP
 .B GID=<OPT>
 List of group ids to include in report.  Default is all.
@@ -239,38 +272,83 @@ List of partitions jobs ran on to include in report.  Default is all.
 When used with the Sizes report will print number of jobs ran instead
 of time used.  
 .TP
-.B TopCount=<OPT>
-Used in the TopUsage report.  Change the number of users displayed.
-Default is 10.
-.TP
 .B Users=<OPT>
 List of users jobs to include in report.  Default is all.
+.TP
+.B Wckeys=<OPT>
+List of wckeys to use for the report.  Default is all.  The
+SizesbyWckey report all users summed together.  If you want only
+certain users specify them them with the Users= option.
 .RE
 
 .TP
 USER
 .RS
 .TP
-.B Clusters=<OPT>
-List of clusters to include in report. Default is local cluster.
+.B Accounts=<OPT>
+List of accounts to use for the report. Default is all.
 .TP
 .B Group
-Group all accounts together for each user.  Default is a separate entry for each user and account reference.
+Group all accounts together for each user.  Default is a separate
+entry for each user and account reference.
+.TP
+.B TopCount=<OPT>
+Used in the TopUsage report.  Change the number of users displayed.
+Default is 10.
 .TP
 .B Users=<OPT>
 List of users jobs to include in report.  Default is all.
 .RE
 
-
+.TP
+ 
+.SH "Format Options for Each Report"
+
+\fBCluster\fP
+       AccountUtilizationByUser
+       UserUtilizationByAccount
+             \- Accounts, Cluster, CPUCount, Login, Proper, Used
+       UserUtilizationByWckey
+       WCKeyUtilizationByUser
+             \- Cluster, CPUCount, Login, Proper, Used, Wckey
+       Utilization
+             \- Allocated, Cluster, CPUCount, Down, Idle, Overcommited,
+               PlannedDown, Reported, Reserved
+
+\fBJob\fP
+       SizesByAccount
+             \- Account, Cluster
+       SizesByWckey
+             \- Wckey, Cluster
+
+\fBUser\fP
+       TopUsage
+             \- Account, Cluster, Login, Proper, Used
+                                                                           
+.TP         
 .SH "EXAMPLES"
-.eo
-.br
-> sreport job sizes 
-.br
-> sreport cluster utilization 
-.br
-> sreport user top 
-.ec
+.TP
+\fBsreport job sizesbyaccount\fP
+.TP
+\fBreport cluster utilization\fP
+.TP
+\fBsreport user top\fP
+.TP
+\fBsreport job sizesbyaccount All_Clusters users=gore1 account=environ PrintJobCount\fP
+Report number of jobs by user gore1 within the environ account
+.TP
+\fBsreport cluster AccountUtilizationByUser cluster=zeus user=gore1 start=2/23/08 end=2/24/09 format=Accounts,Cluster,CPU_Count,Login,Proper,Used\fP
+Report cluster account utilization with the specified fields during
+the specified 24 hour day of February 23, 2009, by user gore1
+.TP
+\fBsreport cluster AccountUtilizationByUser cluster=zeus accounts=lc start=2/23/08 end=2/24/09\fP
+Report cluster account utilization by user in the LC account on
+cluster zeus
+.TP
+\fBsreport user topusage start=2/16/09 end=2/23/09 \-t percent account=lc\fP
+Report top usage in percent of the lc account during the specified week
+.TP
+               
 
 .SH "COPYING"
 Copyright (C) 2008 Lawrence Livermore National Security.
diff --git a/doc/man/man1/srun.1 b/doc/man/man1/srun.1
index 14e571ae8..c4d024785 100644
--- a/doc/man/man1/srun.1
+++ b/doc/man/man1/srun.1
@@ -967,7 +967,12 @@ applicable to other systems as well.
 
 .TP
 \fB\-\-blrts\-image\fR=\fIpath\fR
-Path to blrts image for bluegene block. 
+Path to blrts image for bluegene block.  BGL only.
+Default from \fIblugene.conf\fR if not set.
+
+.TP
+\fB\-\-cnload\-image\fR=\fIpath\fR
+Path to compute node image for bluegene block.  BGP only.
 Default from \fIblugene.conf\fR if not set.
 
 .TP
@@ -977,6 +982,9 @@ On Blue Gene the acceptable of \fItype\fR are MESH, TORUS and NAV.
 If NAV, or if not set, then SLURM will try to fit a TORUS else MESH.
 You should not normally set this option.
 SLURM will normally allocate a TORUS if possible for a given geometry.
+If running on a BGP system and wanting to run in HTC mode (only for 1
+midplane and below).  You can use HTC_S for SMP, HTC_D for Dual, HTC_V
+for virtual node mode, and HTC_L for Linux mode.
 
 .TP
 \fB\-g\fR, \fB\-\-geometry\fR=\fIXxYxZ\fR
@@ -986,9 +994,14 @@ Z directions. For example "\-\-geometry=2x3x4", specifies a block
 of nodes having 2 x 3 x 4 = 24 nodes (actually base partitions on 
 Blue Gene).
 
+.TP
+\fB\-\-ioload\-image\fR=\fIpath\fR
+Path to io image for bluegene block.  BGP only.
+Default from \fIblugene.conf\fR if not set.
+
 .TP
 \fB\-\-linux\-image\fR=\fIpath\fR
-Path to linux image for bluegene block.
+Path to linux image for bluegene block.  BGL only.
 Default from \fIblugene.conf\fR if not set.
 
 .TP
@@ -1004,7 +1017,7 @@ By default the specified geometry can rotate in three dimensions.
 
 .TP
 \fB\-\-ramdisk\-image\fR=\fIpath\fR
-Path to ramdisk image for bluegene block.
+Path to ramdisk image for bluegene block.  BGL only.
 Default from \fIblugene.conf\fR if not set.
 
 .TP
diff --git a/doc/man/man5/bluegene.conf.5 b/doc/man/man5/bluegene.conf.5
index 344b5b5fc..eb2426123 100644
--- a/doc/man/man5/bluegene.conf.5
+++ b/doc/man/man5/bluegene.conf.5
@@ -17,8 +17,10 @@ Changes to the configuration file take effect upon restart of
 SLURM daemons, daemon receipt of the SIGHUP signal, or execution 
 of the command "scontrol reconfigure" unless otherwise noted.
 .LP
-The overall configuration parameters available include:
 
+There are some differences between Bluegene/L and Bluegene/P in respects to the contents of the bluegene.conf file.  
+
+.SH "The Bluegene/L specific options are:"
 .TP
 \fBAltBlrtsImage\fR
 Alternative BlrtsImage.  This is an optional field only used for 
@@ -36,16 +38,59 @@ Groups= is not stated then this image will be able to be used by all
 groups. You can but as many alternative images as you want in the conf file.
 
 .TP
-\fBAltMloaderImage\fR
-Alternative MloaderImage.  This is an optional field only used for 
+\fBAltRamDiskImage\fR
+Alternative RamDiskImage.  This is an optional field only used for 
 mulitple images on a system and should be followed by a Groups= with
 the user groups allowed to use this image (i.e. Groups=da,jette) if 
 Groups= is not stated then this image will be able to be used by all
 groups. You can but as many alternative images as you want in the conf file.
 
 .TP
-\fBAltRamDiskImage\fR
-Alternative RamDiskImage.  This is an optional field only used for 
+\fBBlrtsImage\fR
+BlrtsImage used for creation of all bgblocks.
+There is no default value and this must be specified.
+
+.TP
+\fBLinuxImage\fR
+LinuxImage used for creation of all bgblocks.
+There is no default value and this must be specified.
+
+.TP
+\fBRamDiskImage\fR
+RamDiskImage used for creation of all bgblocks.
+There is no default value and this must be specified.
+
+.SH "The Bluegene/P specific options are:"
+.TP
+\fBAltCnloadImage\fR
+Alternative CnloadImage.  This is an optional field only used for 
+mulitple images on a system and should be followed by a Groups= with
+the user groups allowed to use this image (i.e. Groups=da,jette) if 
+Groups= is not stated then this image will be able to be used by all
+groups. You can but as many alternative images as you want in the conf file.
+
+.TP
+\fBAltIoloadImage\fR
+Alternative IoloadImage.  This is an optional field only used for 
+mulitple images on a system and should be followed by a Groups= with
+the user groups allowed to use this image (i.e. Groups=da,jette) if 
+Groups= is not stated then this image will be able to be used by all
+groups. You can but as many alternative images as you want in the conf file.
+
+.TP
+\fBCnloadImage\fR
+CnloadImage used for creation of all bgblocks.
+There is no default value and this must be specified.
+
+.TP
+\fBIoloadImage\fR
+IoloadImage used for creation of all bgblocks.
+There is no default value and this must be specified.
+
+.SH "All options below are common on all Bluegene systems:"
+.TP
+\fBAltMloaderImage\fR
+Alternative MloaderImage.  This is an optional field only used for 
 mulitple images on a system and should be followed by a Groups= with
 the user groups allowed to use this image (i.e. Groups=da,jette) if 
 Groups= is not stated then this image will be able to be used by all
@@ -57,11 +102,6 @@ The number of c\-nodes per base partition.
 There is no default value and this must be specified. (For bgl systems this
 is usually 512)
 
-.TP
-\fBBlrtsImage\fR
-BlrtsImage used for creation of all bgblocks.
-There is no default value and this must be specified.
-
 .TP
 \fBBridgeAPILogFile\fR
 Fully qualified pathname of a into which the Bridge API logs are 
@@ -85,6 +125,11 @@ The default value is 0.
 \fB4\fR: Log all messages
 .RE
 
+.TP
+\fBDenyPassthrough\fR
+Specify which dimsions you do not want to allow pass throughs.  Valid options are X, Y, Z or all.
+example: If you don't want to allow passthroughs in the X and Y diminsions you would specify DenyPassthrough=X,Y
+
 .TP
 \fBLayoutMode\fR
 Describes how SLURM should create bgblocks.
@@ -104,11 +149,6 @@ and starvation of larger jobs.
 \fBUse this mode with caution.\fR
 .RE
 
-.TP
-\fBLinuxImage\fR
-LinuxImage used for creation of all bgblocks.
-There is no default value and this must be specified.
-
 .TP
 \fBMloaderImage\fR
 MloaderImage used for creation of all bgblocks.
@@ -132,11 +172,6 @@ you do not wish to create small blocks, 8 is usually the number to use.
 For bgp IO rich systems 32 is the value that should be used to create small 
 blocks since you can only have 2 ionodes per nodecard instead of 4 like on bgl.
 
-.TP
-\fBRamDiskImage\fR
-RamDiskImage used for creation of all bgblocks.
-There is no default value and this must be specified.
-
 .LP
 Each bgblock is defined by the base partitions used to construct it.
 Ordering is very important for laying out switch wires.  Please create
diff --git a/doc/man/man5/slurm.conf.5 b/doc/man/man5/slurm.conf.5
index 39877814b..eb8618009 100644
--- a/doc/man/man5/slurm.conf.5
+++ b/doc/man/man5/slurm.conf.5
@@ -217,7 +217,7 @@ job step credentials.
 The slurmctld daemon must be restarted for a change in \fBCryptoType\fR
 to take effect.
 Acceptable values at present include "crypto/munge" and "crypto/openssl".
-The default value is "crypto/munge".
+The default value is "crypto/openssl".
 
 .TP
 \fBDefMemPerCPU\fR
@@ -379,7 +379,11 @@ The default value is zero, which disables execution.
 \fBHealthCheckProgram\fR
 Fully qualified pathname of a script to execute as user root periodically 
 on all compute nodes that are not in the DOWN state. This may be used to 
-verify the node is fully operational and DRAIN the it otherwise.
+verify the node is fully operational and DRAIN the node or send email
+if a problem is detected. 
+Any action to be taken must be explicitly performed by the program
+(e.g. execute "scontrol update NodeName=foo State=drain Reason=tmp_file_system_full"
+to drain a node).
 The interval is controlled using the \fBHealthCheckInterval\fR parameter.
 Note that the \fBHealthCheckProgram\fR will be executed at the same time 
 on all nodes to minimize its impact upon parallel programs.
@@ -1222,6 +1226,9 @@ available to identify the process ID of the task being started.
 Standard output from this program of the form 
 "export NAME=value" will be used to set environment variables 
 for the task being spawned. 
+Standard output from this program of the form
+"print ..." will cause that line (without the leading "print ")
+to be printed to the job's standard output.
 The order of task prolog/epilog execution is as follows:
 .RS
 .TP
@@ -1641,7 +1648,7 @@ The default value is "NO".
 
 .TP
 \fBMaxNodes\fR
-Maximum count of nodes (or base partitions for BlueGene systems) which 
+Maximum count of nodes (c\-nodes for BlueGene systems) which 
 may be allocated to any single job.
 The default value is "UNLIMITED", which is represented internally as \-1.
 This limit does not apply to jobs executed by SlurmUser or user root.
diff --git a/doc/man/man5/slurmdbd.conf.5 b/doc/man/man5/slurmdbd.conf.5
index 33a8e8e3f..c94a518de 100644
--- a/doc/man/man5/slurmdbd.conf.5
+++ b/doc/man/man5/slurmdbd.conf.5
@@ -1,4 +1,4 @@
-.TH "slurmdbd.conf" "5" "June 2008" "slurmdbd.conf 1.3" "Slurm configuration file"
+.TH "slurmdbd.conf" "5" "Feb 2009" "slurmdbd.conf 1.3" "Slurm configuration file"
 .SH "NAME"
 slurmdbd.conf \- Slurm Database Daemon (SlurmDBD) configuration file 
 
@@ -22,18 +22,38 @@ contains a database password.
 The overall configuration parameters available include:
 
 .TP
-\fBArchiveAge\fR
-Move data over this age out of the database to an archive.
-The time is a numeric value and is a number of days.
-If zero, then never archive the data.
-The default value is zero.
+\fBArchiveDir\fR
+If ArchiveScript is not set the slurmdbd will generate a text file that can be 
+read in anytime with sacctmgr load filename.  This directory is where the 
+file will be placed archive has ran.  Default is /tmp.
+
+.TP
+\fBArchiveJobs\fR
+Boolean, yes to archive job data, no other wise.  Default is no.
 
 .TP
 \fBArchiveScript\fR
 This script is executed periodically in order to transfer accounting
 records out of the database into an archive. The script is executed 
-with a single argument, the value of \fBArchiveTime\fR as described
-below.
+with a no arguments, The following environment variables are set.
+.RS
+.TP
+\fBSLURM_ARCHIVE_STEPS\fR 
+1 for archive steps 0 otherwise.
+.TP
+\fBSLURM_ARCHIVE_LAST_STEP\fR
+Time of last step start to archive.
+.TP
+\fBSLURM_ARCHIVE_JOBS\fR
+1 for achive jobs 0 otherwise.
+.TP
+\fBSLURM_ARCHIVE_LAST_JOB\fR
+Time of last job submit to archive.
+.RE
+
+.TP
+\fBArchiveSteps\fR
+Boolean, yes to archive step data, no other wise.  Default is no.
 
 .TP
 \fBAuthInfo\fR
@@ -98,13 +118,17 @@ Values from 0 to 9 are legal, with `0' being "quiet" operation and
 `9' being insanely verbose.
 The default value is 3.
 
+.TP
+\fBDefaultQOS\fR
+When adding a new cluster this will be used as the qos for the cluster 
+unless something is explicitly set by the admin with the create.
+ 
 .TP
 \fBJobPurge\fR
 Individual job records over this age are purged from the database.
 Aggregated information will be preserved indefinitely.
-The time is a numeric value and is a number of days.
-If zero, then job records are never purged.
-The default value is 360 days.
+The time is a numeric value and is a number of months.
+If zero (default), then job records are never purged.
 
 .TP
 \fBLogFile\fR
@@ -173,9 +197,8 @@ The default value is "root".
 \fBStepPurge\fR
 Individual job step records over this age are purged from the database.
 Aggregated information will be preserved indefinitely.
-The time is a numeric value and is a number of days.
-If zero, then job step records are never purged.
-The default value is 30 days.
+The time is a numeric value and is a number of months.
+If zero (default), then job step records are never purged.
 
 .TP
 \fBStorageHost\fR
@@ -237,9 +260,11 @@ Characterization Key. Must be set to track wckey usage.
 .br
 #
 .br
-ArchiveAge=365   # keep 1 year of data online
+ArchiveJobs=yes
+.br
+ArchiveSteps=no
 .br
-ArchiveScript=/usr/sbin/slurm.dbd.archive
+#ArchiveScript=/usr/sbin/slurm.dbd.archive
 .br
 AuthInfo=/var/run/munge/munge.socket.2
 .br
@@ -249,9 +274,9 @@ DbdHost=db_host
 .br
 DebugLevel=4
 .br
-JobPurge=90
+JobPurge=12
 .br
-StepPurge=30
+StepPurge=1
 .br
 LogFile=/var/log/slurmdbd.log
 .br
diff --git a/doc/man/man8/spank.8 b/doc/man/man8/spank.8
index 6bca8161e..047569102 100644
--- a/doc/man/man8/spank.8
+++ b/doc/man/man8/spank.8
@@ -1,4 +1,4 @@
-.TH "SPANK" "8" "Jul 2008" "SPANK" "SLURM plug\-in architecture for Node and job (K)control"
+.TH "SPANK" "8" "February 2009" "SPANK" "SLURM plug\-in architecture for Node and job (K)control"
 .SH "NAME"
 \fBSPANK\fR \- SLURM Plug\-in Architecture for Node and job (K)control 
 
@@ -20,8 +20,8 @@ behavior of SLURM job launch.
 
 .SH "SPANK PLUGINS"
 \fBSPANK\fR plugins are loaded in two separate contexts during a 
-\fBSLURM\fR job.  In "local" context, the plugin is loaded by \fBsrun\fR,
-\fBsbatch\fR or other \fBSLURM\fR user interface. 
+\fBSLURM\fR job.  In "local" context, the plugin is loaded by \fBsrun\fR
+(NOTE: the \fBsalloc\fR and \fBsbatch\fR commands do not support \fBSPANK\fR).
 In local context, options provided by 
 plugins are read by \fBSPANK\fR, and these options are presented to the user. 
 In "remote" context, the plugin is loaded on a compute node of the job,
@@ -47,12 +47,11 @@ the \fBinit\fR callback, then process user options, and finaly take some
 action in \fBslurm_spank_init_post_opt\fR if necessary.
 .TP
 \fBslurm_spank_local_user_init\fR
-Called in local (\fBsrun\fR or \fBsbatch\fR) context only after all 
+Called in local (\fBsrun\fR) context only after all 
 options have been processed. 
 This is called after the job ID and step IDs are available.
 This happens in \fBsrun\fR after the allocation is made, but before 
 tasks are launched.
-This happens in \fBsbatch\fR after the job is submitted.
 .TP
 \fBslurm_spank_user_init\fR 
 Called after privileges are temporarily dropped. (remote context only)
@@ -72,7 +71,7 @@ Called for each task as its exit status is collected by SLURM.
 .TP
 \fBslurm_spank_exit\fR
 Called once just before \fBslurmstepd\fR exits in remote context.
-In local context, called before \fBsrun\fR or \fBsbatch\fR exits.
+In local context, called before \fBsrun\fR exits.
 .LP
 All of these functions have the same prototype, for example:
 .nf
@@ -119,9 +118,7 @@ file. Some examples are:
 User id for running job. (uid_t *) is third arg of \fBspank_get_item\fR
 .TP
 \fBS_JOB_STEPID\fR
-Job step id for running job. (uint32_t *) is third arg of \fBspank_get_item\fR
-For batch jobs (initiated by \fBsbatch\fR), the step id will be 
-\fBSLURM_BATCH_SCRIPT\fR as defined in the \fBslurm.h\fR file.
+Job step id for running job. (uint32_t *) is third arg of \fBspank_get_item\fR.
 .TP
 \fBS_TASK_EXIT_STATUS\fR
 Exit status for exited task. Only valid from \fBslurm_spank_task_exit\fR.
@@ -163,8 +160,7 @@ for \fBspank_getenv\fR usage.
 .LP
 SPANK plugins also have an interface through which they may define
 and implement extra job options. These options are made available to
-the user through SLURM commands such as \fBsrun\fR(1) or 
-\fBsbatch\fR(1), and if the
+the user through SLURM commands such as \fBsrun\fR(1), and if the
 option is specified, its value is forwarded and registered with
 the plugin on the remote side. In this way, \fBSPANK\fR plugins
 may dynamically provide new options and functionality to SLURM.
@@ -217,7 +213,7 @@ registered with SLURM. \fBspank_opt_cb_f\fR is typedef'd in
 Where \fIval\fR is the value of the \fIval\fR field in the \fBspank_option\fR
 struct, \fIoptarg\fR is the supplied argument if applicable, and \fIremote\fR
 is 0 if the function is being called from the "local" host 
-(e.g. \fBsrun\fR or \fBsbatch\fR) or 1 from the "remote" host (\fBslurmd\fR).
+(e.g. \fBsrun\fR) or 1 from the "remote" host (\fBslurmd\fR).
 .LP
 There are two methods by which the plugin can register these options
 with SLURM. The simplest method is for the plugin to define an array
@@ -505,4 +501,4 @@ details.
 \fB/usr/include/slurm/spank.h\fR \- SPANK header file.
 .SH "SEE ALSO"
 .LP
-\fBsbatch\fR(1), \fBsrun\fR(1), \fBslurm.conf\fR(5)
+\fBsrun\fR(1), \fBslurm.conf\fR(5)
diff --git a/etc/bluegene.conf.example b/etc/bluegene.conf.example
index ef9fb3859..8ce73bd51 100644
--- a/etc/bluegene.conf.example
+++ b/etc/bluegene.conf.example
@@ -73,6 +73,8 @@ Numpsets=64 #used for IO rich systems
 BridgeAPILogFile=/var/log/slurm/bridgeapi.log
 BridgeAPIVerbose=0
 
+#DenyPassthrough=X,Y,Z
+
 ###############################################################################
 # Define the static/overlap partitions (bgblocks)
 #
diff --git a/etc/slurmdbd.conf.example b/etc/slurmdbd.conf.example
new file mode 100644
index 000000000..3d609b084
--- /dev/null
+++ b/etc/slurmdbd.conf.example
@@ -0,0 +1,39 @@
+#
+# Example slurmdbd.conf file.
+#
+# See the slurmdbd.conf man page for more information.
+#
+# Archive info
+#ArchiveJobs=yes
+#ArchiveDir="/tmp"
+#ArchiveSteps=yes
+#ArchiveScript=
+#JobPurge=12
+#StepPurge=1
+#
+# Authentication info
+AuthType=auth/munge
+#AuthInfo=/var/run/munge/munge.socket.2
+#
+# slurmDBD info
+DbdAddr=localhost
+DbdHost=localhost
+#DbdPort=7031
+SlurmUser=slurm
+#MessageTimeout=300
+DebugLevel=4
+#DefaultQOS=normal,standby
+LogFile=/var/log/slurm/slurmdbd.log
+PidFile=/var/run/slurmdbd.pid
+#PluginDir=/usr/lib/slurm
+#PrivateData=accounts,users,usage,jobs
+#TrackWCKey=yes
+#
+# Database info
+StorageType=accounting_storage/mysql
+#StorageHost=localhost
+#StoragePort=1234
+StoragePassword=password
+StorageUser=slurm
+#StorageLoc=slurm_acct_db
+
diff --git a/slurm.spec b/slurm.spec
index 483fb04d8..7a4d191f7 100644
--- a/slurm.spec
+++ b/slurm.spec
@@ -1,4 +1,4 @@
-# $Id: slurm.spec 16103 2008-12-29 23:24:59Z jette $
+# $Id: slurm.spec 16983 2009-03-24 16:33:55Z da $
 #
 # Note that this package is not relocatable
 
@@ -73,14 +73,14 @@
 %endif
 
 Name:    slurm
-Version: 1.3.13
+Version: 1.3.15
 Release: 1%{?dist}
 
 Summary: Simple Linux Utility for Resource Management
 
 License: GPL 
 Group: System Environment/Base
-Source: slurm-1.3.13.tar.bz2
+Source: slurm-1.3.15.tar.bz2
 BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}
 URL: https://computing.llnl.gov/linux/slurm/
 
@@ -230,10 +230,17 @@ SLURM plugins (loadable shared objects)
 %package torque
 Summary: Torque/PBS wrappers for transitition from Torque/PBS to SLURM.
 Group: Development/System
-Requires: slurm
+Requires: slurm-perlapi
 %description torque
 Torque wrapper scripts used for helping migrate from Torque/PBS to SLURM.
 
+%package slurmdb-direct
+Summary: Wrappers to write directly to the slurmdb.
+Group: Development/System
+Requires: slurm-perlapi
+%description slurmdb-direct
+Wrappers to write directly to the slurmdb.
+
 %if %{slurm_with aix}
 %package aix-federation
 Summary: SLURM interfaces to IBM AIX and Federation switch.
@@ -258,7 +265,7 @@ SLURM process tracking plugin for SGI job containers.
 #############################################################################
 
 %prep
-%setup -n slurm-1.3.13
+%setup -n slurm-1.3.15
 
 %build
 %configure --program-prefix=%{?_program_prefix:%{_program_prefix}} \
@@ -287,6 +294,7 @@ if [ -d /etc/init.d ]; then
    install -D -m755 etc/init.d.slurmdbd $RPM_BUILD_ROOT/etc/init.d/slurmdbd
 fi
 install -D -m644 etc/slurm.conf.example ${RPM_BUILD_ROOT}%{_sysconfdir}/slurm.conf.example
+install -D -m644 etc/slurmdbd.conf.example ${RPM_BUILD_ROOT}%{_sysconfdir}/slurmdbd.conf.example
 install -D -m755 etc/slurm.epilog.clean ${RPM_BUILD_ROOT}%{_sysconfdir}/slurm.epilog.clean
 
 # Delete unpackaged files:
@@ -426,6 +434,7 @@ rm -rf $RPM_BUILD_ROOT
 %{_sbindir}/slurmdbd
 %{_mandir}/man5/slurmdbd.*
 %{_mandir}/man8/slurmdbd.*
+%config %{_sysconfdir}/slurmdbd.conf.example
 #############################################################################
 
 %files -f plugins.files plugins
@@ -480,6 +489,12 @@ rm -rf $RPM_BUILD_ROOT
 %{_bindir}/mpiexec
 #############################################################################
 
+%files slurmdb-direct
+%defattr(-,root,root)
+%config (noreplace) %{_perldir}/config.slurmdb.pl
+%{_sbindir}/moab_2_slurmdb
+#############################################################################
+
 %if %{slurm_with aix}
 %files aix-federation
 %defattr(-,root,root)
diff --git a/slurm/slurm.h.in b/slurm/slurm.h.in
index d4a4d4e9f..b63eb1e23 100644
--- a/slurm/slurm.h.in
+++ b/slurm/slurm.h.in
@@ -252,7 +252,11 @@ enum connection_type {
 	SELECT_MESH, 		/* nodes wired in mesh */
 	SELECT_TORUS, 		/* nodes wired in torus */
 	SELECT_NAV,		/* nodes wired in torus else mesh */
-	SELECT_SMALL 		/* nodes in a small partition */
+	SELECT_SMALL, 		/* nodes in a small partition */
+	SELECT_HTC_S,           /* nodes in a htc running SMP mode */
+	SELECT_HTC_D,           /* nodes in a htc running Dual mode */
+	SELECT_HTC_V,           /* nodes in a htc running VN mode */
+	SELECT_HTC_L            /* nodes in a htc running in Linux mode */
 };
 
 enum node_use_type {
diff --git a/slurm/slurm_errno.h b/slurm/slurm_errno.h
index eafe21aa7..0cb0d1b82 100644
--- a/slurm/slurm_errno.h
+++ b/slurm/slurm_errno.h
@@ -159,6 +159,7 @@ enum {
 	ESLURM_NEED_RESTART,
 	ESLURM_ACCOUNTING_POLICY,
 	ESLURM_INVALID_TIME_LIMIT,
+	ESLURM_INVALID_WCKEY,
 
 	/* switch specific error codes, specific values defined in plugin module */
 	ESLURM_SWITCH_MIN = 3000,
diff --git a/src/api/job_info.c b/src/api/job_info.c
index 516e4eb1c..e52641612 100644
--- a/src/api/job_info.c
+++ b/src/api/job_info.c
@@ -538,8 +538,13 @@ slurm_sprint_job_info ( job_info_t * job_ptr, int one_liner )
 			xstrcat(out, " ");
 		else
 			xstrcat(out, "\n   ");
+#ifdef HAVE_BGL
 		snprintf(tmp_line, sizeof(tmp_line),
 			 "LinuxImage=%s", select_buf);
+#else
+		snprintf(tmp_line, sizeof(tmp_line),
+			 "CnloadImage=%s", select_buf);
+#endif
 		xstrcat(out, tmp_line);
 	}
 	/****** Line 22 (optional) ******/
@@ -564,8 +569,13 @@ slurm_sprint_job_info ( job_info_t * job_ptr, int one_liner )
 			xstrcat(out, " ");
 		else
 			xstrcat(out, "\n   ");
+#ifdef HAVE_BGL
 		snprintf(tmp_line, sizeof(tmp_line),
 			 "RamDiskImage=%s", select_buf);
+#else
+		snprintf(tmp_line, sizeof(tmp_line),
+			 "IoloadImage=%s", select_buf);
+#endif
 		xstrcat(out, tmp_line);
 	}
 	xstrcat(out, "\n\n");
diff --git a/src/common/assoc_mgr.c b/src/common/assoc_mgr.c
index a8395b6fc..3d281c479 100644
--- a/src/common/assoc_mgr.c
+++ b/src/common/assoc_mgr.c
@@ -339,7 +339,7 @@ static int _get_local_association_list(void *db_conn, int enforce)
 	if(local_cluster_name) {
 		assoc_q.cluster_list = list_create(NULL);
 		list_append(assoc_q.cluster_list, local_cluster_name);
-	} else if(enforce && !slurmdbd_conf) {
+	} else if((enforce & ACCOUNTING_ENFORCE_ASSOCS) && !slurmdbd_conf) {
 		error("_get_local_association_list: "
 		      "no cluster name here going to get "
 		      "all associations.");
@@ -356,9 +356,10 @@ static int _get_local_association_list(void *db_conn, int enforce)
 	if(!local_association_list) {
 		/* create list so we don't keep calling this if there
 		   isn't anything there */
-		local_association_list = list_create(NULL);
+		local_association_list = 
+			list_create(destroy_acct_association_rec);
 		slurm_mutex_unlock(&assoc_mgr_association_lock);
-		if(enforce) {
+		if(enforce & ACCOUNTING_ENFORCE_ASSOCS) {
 			error("_get_local_association_list: "
 			      "no list was made.");
 			return SLURM_ERROR;
@@ -387,7 +388,7 @@ static int _get_local_qos_list(void *db_conn, int enforce)
 
 	if(!local_qos_list) {
 		slurm_mutex_unlock(&local_qos_lock);
-		if(enforce) {
+		if(enforce & ACCOUNTING_ENFORCE_ASSOCS) {
 			error("_get_local_qos_list: no list was made.");
 			return SLURM_ERROR;
 		} else {
@@ -414,7 +415,7 @@ static int _get_local_user_list(void *db_conn, int enforce)
 
 	if(!local_user_list) {
 		slurm_mutex_unlock(&local_user_lock);
-		if(enforce) {
+		if(enforce & ACCOUNTING_ENFORCE_ASSOCS) {
 			error("_get_local_user_list: "
 			      "no list was made.");
 			return SLURM_ERROR;
@@ -443,7 +444,7 @@ static int _get_local_wckey_list(void *db_conn, int enforce)
 	if(local_cluster_name) {
 		wckey_q.cluster_list = list_create(NULL);
 		list_append(wckey_q.cluster_list, local_cluster_name);
-	} else if(enforce && !slurmdbd_conf) {
+	} else if((enforce & ACCOUNTING_ENFORCE_WCKEYS) && !slurmdbd_conf) {
 		error("_get_local_wckey_list: "
 		      "no cluster name here going to get "
 		      "all wckeys.");
@@ -460,9 +461,9 @@ static int _get_local_wckey_list(void *db_conn, int enforce)
 	if(!assoc_mgr_wckey_list) {
 		/* create list so we don't keep calling this if there
 		   isn't anything there */
-		assoc_mgr_wckey_list = list_create(NULL);
+		assoc_mgr_wckey_list = list_create(destroy_acct_wckey_rec);
 		slurm_mutex_unlock(&assoc_mgr_wckey_lock);
-		if(enforce) {
+		if(enforce & ACCOUNTING_ENFORCE_WCKEYS) {
 			error("_get_local_wckey_list: "
 			      "no list was made.");
 			return SLURM_ERROR;
@@ -494,7 +495,7 @@ static int _refresh_local_association_list(void *db_conn, int enforce)
 	if(local_cluster_name) {
 		assoc_q.cluster_list = list_create(NULL);
 		list_append(assoc_q.cluster_list, local_cluster_name);
-	} else if(enforce && !slurmdbd_conf) {
+	} else if((enforce & ACCOUNTING_ENFORCE_ASSOCS) && !slurmdbd_conf) {
 		error("_refresh_local_association_list: "
 		      "no cluster name here going to get "
 		      "all associations.");
@@ -634,7 +635,7 @@ static int _refresh_local_wckey_list(void *db_conn, int enforce)
 	if(local_cluster_name) {
 		wckey_q.cluster_list = list_create(NULL);
 		list_append(wckey_q.cluster_list, local_cluster_name);
-	} else if(enforce && !slurmdbd_conf) {
+	} else if((enforce & ACCOUNTING_ENFORCE_WCKEYS) && !slurmdbd_conf) {
 		error("_refresh_local_wckey_list: "
 		      "no cluster name here going to get "
 		      "all wckeys.");
@@ -745,7 +746,7 @@ extern int assoc_mgr_fill_in_assoc(void *db_conn, acct_association_rec_t *assoc,
 			return SLURM_ERROR;
 	}
 	if((!local_association_list || !list_count(local_association_list))
-	   && !enforce) 
+	   && !(enforce & ACCOUNTING_ENFORCE_ASSOCS)) 
 		return SLURM_SUCCESS;
 
 	if(!assoc->id) {
@@ -753,7 +754,7 @@ extern int assoc_mgr_fill_in_assoc(void *db_conn, acct_association_rec_t *assoc,
 			acct_user_rec_t user;
 
 			if(assoc->uid == (uint32_t)NO_VAL) {
-				if(enforce) {
+				if(enforce & ACCOUNTING_ENFORCE_ASSOCS) {
 					error("get_assoc_id: "
 					      "Not enough info to "
 					      "get an association");
@@ -766,7 +767,7 @@ extern int assoc_mgr_fill_in_assoc(void *db_conn, acct_association_rec_t *assoc,
 			user.uid = assoc->uid;
 			if(assoc_mgr_fill_in_user(db_conn, &user, enforce) 
 			   == SLURM_ERROR) {
-				if(enforce) 
+				if(enforce & ACCOUNTING_ENFORCE_ASSOCS) 
 					return SLURM_ERROR;
 				else {
 					return SLURM_SUCCESS;
@@ -835,7 +836,7 @@ extern int assoc_mgr_fill_in_assoc(void *db_conn, acct_association_rec_t *assoc,
 	
 	if(!ret_assoc) {
 		slurm_mutex_unlock(&assoc_mgr_association_lock);
-		if(enforce) 
+		if(enforce & ACCOUNTING_ENFORCE_ASSOCS) 
 			return SLURM_ERROR;
 		else
 			return SLURM_SUCCESS;
@@ -895,7 +896,8 @@ extern int assoc_mgr_fill_in_user(void *db_conn, acct_user_rec_t *user,
 		if(_get_local_user_list(db_conn, enforce) == SLURM_ERROR)
 			return SLURM_ERROR;
 
-	if((!local_user_list || !list_count(local_user_list)) && !enforce) 
+	if((!local_user_list || !list_count(local_user_list)) 
+	   && !(enforce & ACCOUNTING_ENFORCE_ASSOCS)) 
 		return SLURM_SUCCESS;
 
 	slurm_mutex_lock(&local_user_lock);
@@ -940,7 +942,7 @@ extern int assoc_mgr_fill_in_wckey(void *db_conn, acct_wckey_rec_t *wckey,
 			return SLURM_ERROR;
 	}
 	if((!assoc_mgr_wckey_list || !list_count(assoc_mgr_wckey_list))
-	   && !enforce) 
+	   && !(enforce & ACCOUNTING_ENFORCE_WCKEYS)) 
 		return SLURM_SUCCESS;
 
 	if(!wckey->id) {
@@ -948,7 +950,7 @@ extern int assoc_mgr_fill_in_wckey(void *db_conn, acct_wckey_rec_t *wckey,
 			acct_user_rec_t user;
 
 			if(wckey->uid == (uint32_t)NO_VAL && !wckey->user) {
-				if(enforce) {
+				if(enforce & ACCOUNTING_ENFORCE_WCKEYS) {
 					error("get_wckey_id: "
 					      "Not enough info to "
 					      "get an wckey");
@@ -962,7 +964,7 @@ extern int assoc_mgr_fill_in_wckey(void *db_conn, acct_wckey_rec_t *wckey,
 			user.name = wckey->user;
 			if(assoc_mgr_fill_in_user(db_conn, &user, enforce) 
 			   == SLURM_ERROR) {
-				if(enforce) 
+				if(enforce & ACCOUNTING_ENFORCE_WCKEYS) 
 					return SLURM_ERROR;
 				else {
 					return SLURM_SUCCESS;
@@ -972,7 +974,7 @@ extern int assoc_mgr_fill_in_wckey(void *db_conn, acct_wckey_rec_t *wckey,
 				wckey->user = user.name;
 			wckey->name = user.default_wckey;
 		} else if(wckey->uid == (uint32_t)NO_VAL && !wckey->user) {
-			if(enforce) {
+			if(enforce & ACCOUNTING_ENFORCE_WCKEYS) {
 				error("get_wckey_id: "
 				      "Not enough info 2 to "
 				      "get an wckey");
@@ -1017,13 +1019,22 @@ extern int assoc_mgr_fill_in_wckey(void *db_conn, acct_wckey_rec_t *wckey,
 				       wckey->name, found_wckey->name);
 				continue;
 			}
-			
+
 			/* only check for on the slurmdbd */
-			if(!local_cluster_name && found_wckey->cluster
-			   && strcasecmp(wckey->cluster,
-					 found_wckey->cluster)) {
-				debug4("not the right cluster");
-				continue;
+			if(!local_cluster_name) {
+				if(!wckey->cluster) {
+					error("No cluster name was given "
+					      "to check against, "
+					      "we need one to get a wckey.");
+					continue;
+				}
+				
+				if(found_wckey->cluster
+				   && strcasecmp(wckey->cluster, 
+						 found_wckey->cluster)) {
+					debug4("not the right cluster");
+					continue;
+				}
 			}
 		}
 		ret_wckey = found_wckey;
@@ -1033,7 +1044,7 @@ extern int assoc_mgr_fill_in_wckey(void *db_conn, acct_wckey_rec_t *wckey,
 	
 	if(!ret_wckey) {
 		slurm_mutex_unlock(&assoc_mgr_wckey_lock);
-		if(enforce) 
+		if(enforce & ACCOUNTING_ENFORCE_WCKEYS) 
 			return SLURM_ERROR;
 		else
 			return SLURM_SUCCESS;
@@ -1137,6 +1148,7 @@ extern int assoc_mgr_update_local_assocs(acct_update_object_t *update)
 	ListIterator itr = NULL;
 	int rc = SLURM_SUCCESS;
 	int parents_changed = 0;
+	List remove_list = NULL;
 
 	if(!local_association_list)
 		return SLURM_SUCCESS;
@@ -1276,9 +1288,19 @@ extern int assoc_mgr_update_local_assocs(acct_update_object_t *update)
 				//rc = SLURM_ERROR;
 				break;
 			}
-			if (remove_assoc_notify)
-				remove_assoc_notify(rec);
-			list_delete_item(itr);
+			if(remove_assoc_notify) {
+				/* since there are some deadlock
+				   issues while inside our lock here
+				   we have to process a notify later 
+				*/
+				if(!remove_list)
+					remove_list = list_create(
+						destroy_acct_association_rec);
+				list_remove(itr);
+				list_append(remove_list, rec);
+			} else
+				list_delete_item(itr);
+
 			break;
 		default:
 			break;
@@ -1330,6 +1352,18 @@ extern int assoc_mgr_update_local_assocs(acct_update_object_t *update)
 
 	list_iterator_destroy(itr);
 	slurm_mutex_unlock(&assoc_mgr_association_lock);
+	
+	/* This needs to happen outside of the
+	   assoc_mgr_association_lock */
+	if(remove_list) {
+		itr = list_iterator_create(remove_list);
+
+		while((rec = list_next(itr))) 
+			remove_assoc_notify(rec);
+		
+		list_iterator_destroy(itr);
+		list_destroy(remove_list);
+	}
 
 	return rc;	
 }
@@ -1620,7 +1654,7 @@ extern int assoc_mgr_validate_assoc_id(void *db_conn,
 			return SLURM_ERROR;
 
 	if((!local_association_list || !list_count(local_association_list))
-	   && !enforce) 
+	   && !(enforce & ACCOUNTING_ENFORCE_ASSOCS)) 
 		return SLURM_SUCCESS;
 	
 	slurm_mutex_lock(&assoc_mgr_association_lock);
@@ -1632,7 +1666,7 @@ extern int assoc_mgr_validate_assoc_id(void *db_conn,
 	list_iterator_destroy(itr);
 	slurm_mutex_unlock(&assoc_mgr_association_lock);
 
-	if(found_assoc || !enforce)
+	if(found_assoc || !(enforce & ACCOUNTING_ENFORCE_ASSOCS))
 		return SLURM_SUCCESS;
 
 	return SLURM_ERROR;
@@ -1836,6 +1870,8 @@ extern int load_assoc_mgr_state(char *state_save_location)
 				break;
 			}
 			slurm_mutex_lock(&assoc_mgr_association_lock);
+			if(local_association_list)
+				list_destroy(local_association_list);
 			local_association_list = msg->my_list;
 			_post_association_list(local_association_list);
 			debug("Recovered %u associations", 
@@ -1854,6 +1890,8 @@ extern int load_assoc_mgr_state(char *state_save_location)
 				break;
 			}
 			slurm_mutex_lock(&local_user_lock);
+			if(local_user_list)
+				list_destroy(local_user_list);
 			local_user_list = msg->my_list;
 			_post_user_list(local_user_list);
 			debug("Recovered %u users", 
@@ -1872,6 +1910,8 @@ extern int load_assoc_mgr_state(char *state_save_location)
 				break;
 			}
 			slurm_mutex_lock(&local_qos_lock);
+			if(local_qos_list)
+				list_destroy(local_qos_list);
 			local_qos_list = msg->my_list;
 			debug("Recovered %u qos", 
 			      list_count(local_qos_list));
@@ -1889,6 +1929,8 @@ extern int load_assoc_mgr_state(char *state_save_location)
 				break;
 			}
 			slurm_mutex_lock(&assoc_mgr_wckey_lock);
+			if(assoc_mgr_wckey_list)
+				list_destroy(assoc_mgr_wckey_list);
 			assoc_mgr_wckey_list = msg->my_list;
 			debug("Recovered %u wckeys", 
 			      list_count(assoc_mgr_wckey_list));
diff --git a/src/common/checkpoint.c b/src/common/checkpoint.c
index e3aa51938..aae9f9f84 100644
--- a/src/common/checkpoint.c
+++ b/src/common/checkpoint.c
@@ -1,6 +1,6 @@
 /*****************************************************************************\
  *  checkpoint.c - implementation-independent checkpoint functions
- *  $Id: checkpoint.c 14208 2008-06-06 19:15:24Z da $
+ *  $Id: checkpoint.c 17005 2009-03-24 21:57:43Z da $
  *****************************************************************************
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -119,13 +119,14 @@ _slurm_checkpoint_context_create( const char *checkpoint_type )
 static int
 _slurm_checkpoint_context_destroy( slurm_checkpoint_context_t c )
 {
+	int rc = SLURM_SUCCESS;
 	/*
 	 * Must check return code here because plugins might still
 	 * be loaded and active.
 	 */
 	if ( c->plugin_list ) {
 		if ( plugrack_destroy( c->plugin_list ) != SLURM_SUCCESS ) {
-			 return SLURM_ERROR;
+			 rc = SLURM_ERROR;
 		}
 	} else {
 		plugin_unload(c->cur_plugin);
@@ -134,7 +135,7 @@ _slurm_checkpoint_context_destroy( slurm_checkpoint_context_t c )
 	xfree( c->checkpoint_type );
 	xfree( c );
 
-	return SLURM_SUCCESS;
+	return rc;
 }
 
 /*
diff --git a/src/common/env.c b/src/common/env.c
index a40516b36..2b00701c9 100644
--- a/src/common/env.c
+++ b/src/common/env.c
@@ -267,7 +267,6 @@ int setup_env(env_t *env)
 	int rc = SLURM_SUCCESS;
 	char *dist = NULL;
 	char *lllp_dist = NULL;
-	char *bgl_part_id = NULL;
 	char addrbuf[INET_ADDRSTRLEN];
 
 	if (env == NULL)
@@ -529,10 +528,22 @@ int setup_env(env_t *env)
 		rc = SLURM_FAILURE;
 	}
 
+#ifdef HAVE_BG
 	if(env->select_jobinfo) {
+		char *bgl_part_id = NULL;
 		select_g_get_jobinfo(env->select_jobinfo, 
 				     SELECT_DATA_BLOCK_ID, &bgl_part_id);
 		if (bgl_part_id) {
+#ifndef HAVE_BGL
+			uint16_t conn_type = (uint16_t)NO_VAL;
+			select_g_get_jobinfo(env->select_jobinfo, 
+					     SELECT_DATA_CONN_TYPE, &conn_type);
+			if(conn_type > SELECT_SMALL) {
+				if(setenvf(&env->env, 
+					   "SUBMIT_POOL", "%s", bgl_part_id))
+					rc = SLURM_FAILURE;
+			}
+#endif
 			if(setenvf(&env->env, 
 				   "MPIRUN_PARTITION", "%s", bgl_part_id))
 				rc = SLURM_FAILURE;
@@ -541,15 +552,17 @@ int setup_env(env_t *env)
 				rc = SLURM_FAILURE;
 			if(setenvf(&env->env, "MPIRUN_NOALLOCATE", "%d", 1))
 				rc = SLURM_FAILURE;
+			xfree(bgl_part_id);
 		} else 
 			rc = SLURM_FAILURE;
 		
 		if(rc == SLURM_FAILURE)
 			error("Can't set MPIRUN_PARTITION "
 			      "environment variable");
-		xfree(bgl_part_id);
+		
 	}
-	
+#endif
+
 	if (env->jobid >= 0
 	    && setenvf(&env->env, "SLURM_JOBID", "%d", env->jobid)) {
 		error("Unable to set SLURM_JOBID environment");
@@ -786,7 +799,7 @@ void
 env_array_for_job(char ***dest, const resource_allocation_response_msg_t *alloc,
 		  const job_desc_msg_t *desc)
 {
-	char *bgl_part_id = NULL, *tmp;
+	char *tmp = NULL;
 	slurm_step_layout_t *step_layout = NULL;
 	uint32_t num_tasks = desc->num_tasks;
 
@@ -807,16 +820,28 @@ env_array_for_job(char ***dest, const resource_allocation_response_msg_t *alloc,
 	env_array_overwrite(dest, "LOADLBATCH", "yes");
 #endif
 
+#ifdef HAVE_BG
 	/* BlueGene only */
 	select_g_get_jobinfo(alloc->select_jobinfo, SELECT_DATA_BLOCK_ID,
-			     &bgl_part_id);
-	if (bgl_part_id) {
+			     &tmp);
+	if (tmp) {
+#ifndef HAVE_BGL
+		uint16_t conn_type = (uint16_t)NO_VAL;
+		select_g_get_jobinfo(alloc->select_jobinfo, 
+				     SELECT_DATA_CONN_TYPE, &conn_type);
+		if(conn_type > SELECT_SMALL) {
+			env_array_overwrite_fmt(dest, "SUBMIT_POOL", "%s",
+						tmp);
+		}
+#endif
 		env_array_overwrite_fmt(dest, "MPIRUN_PARTITION", "%s",
-					bgl_part_id);
+					tmp);
 		env_array_overwrite_fmt(dest, "MPIRUN_NOFREE", "%d", 1);
 		env_array_overwrite_fmt(dest, "MPIRUN_NOALLOCATE", "%d", 1);
-	}
 
+		xfree(tmp);
+	}
+#endif
 	/* OBSOLETE, but needed by MPI, do not remove */
 	env_array_overwrite_fmt(dest, "SLURM_JOBID", "%u", alloc->job_id);
 	env_array_overwrite_fmt(dest, "SLURM_NNODES", "%u", alloc->node_cnt);
diff --git a/src/common/jobacct_common.c b/src/common/jobacct_common.c
index 8d8744dd9..45c131f03 100644
--- a/src/common/jobacct_common.c
+++ b/src/common/jobacct_common.c
@@ -176,8 +176,10 @@ extern void destroy_jobacct_job_rec(void *object)
 		xfree(job->jobname);
 		xfree(job->partition);
 		xfree(job->nodes);
-		if(job->steps)
+		if(job->steps) {
 			list_destroy(job->steps);
+			job->steps = NULL;
+		}
 		xfree(job->user);
 		xfree(job->wckey);
 		xfree(job);
@@ -558,7 +560,6 @@ extern void jobacct_common_free_jobacct(void *object)
 {
 	struct jobacctinfo *jobacct = (struct jobacctinfo *)object;
 	xfree(jobacct);
-	jobacct = NULL;
 }
 
 extern int jobacct_common_setinfo(struct jobacctinfo *jobacct, 
diff --git a/src/common/mpi.c b/src/common/mpi.c
index 3e71c8dc2..96eb18cdf 100644
--- a/src/common/mpi.c
+++ b/src/common/mpi.c
@@ -110,13 +110,15 @@ _slurm_mpi_context_create(const char *mpi_type)
 static int
 _slurm_mpi_context_destroy( slurm_mpi_context_t c )
 {
+	int rc = SLURM_SUCCESS;
+
 	/*
 	 * Must check return code here because plugins might still
 	 * be loaded and active.
 	 */
 	if ( c->plugin_list ) {
 		if ( plugrack_destroy( c->plugin_list ) != SLURM_SUCCESS ) {
-			return SLURM_ERROR;
+			rc = SLURM_ERROR;
 		}
 	} else {
 		plugin_unload(c->cur_plugin);
@@ -125,7 +127,7 @@ _slurm_mpi_context_destroy( slurm_mpi_context_t c )
 	xfree(c->mpi_type);
 	xfree(c);
 
-	return SLURM_SUCCESS;
+	return rc;
 }
 
 /*
diff --git a/src/common/node_select.c b/src/common/node_select.c
index e4087f3ad..ebf6e78a7 100644
--- a/src/common/node_select.c
+++ b/src/common/node_select.c
@@ -9,7 +9,7 @@
  *  the plugin. This is because functions required by the plugin can not be 
  *  resolved on the front-end nodes, so we can't load the plugins there.
  *
- *  $Id: node_select.c 15717 2008-11-17 23:20:37Z da $
+ *  $Id: node_select.c 17005 2009-03-24 21:57:43Z da $
  *****************************************************************************
  *  Copyright (C) 2002-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -268,13 +268,14 @@ static slurm_select_context_t *_select_context_create(const char *select_type)
  */
 static int _select_context_destroy( slurm_select_context_t *c )
 {
+	int rc = SLURM_SUCCESS;
 	/*
 	 * Must check return code here because plugins might still
 	 * be loaded and active.
 	 */
 	if ( c->plugin_list ) {
 		if ( plugrack_destroy( c->plugin_list ) != SLURM_SUCCESS ) {
-			return SLURM_ERROR;
+			rc = SLURM_ERROR;
 		}
 	} else {
 		plugin_unload(c->cur_plugin);
@@ -283,7 +284,7 @@ static int _select_context_destroy( slurm_select_context_t *c )
 	xfree( c->select_type );
 	xfree( c );
 
-	return SLURM_SUCCESS;
+	return rc;
 }
 
 /*
@@ -772,14 +773,33 @@ unpack_error:
 
 static char *_job_conn_type_string(uint16_t inx)
 {
-	if (inx == SELECT_TORUS)
+	switch(inx) {
+	case SELECT_TORUS:
 		return "torus";
-	else if (inx == SELECT_MESH)
+		break;
+	case SELECT_MESH:
 		return "mesh";
-	else if (inx == SELECT_SMALL)
+		break;
+	case SELECT_SMALL:
 		return "small";
-	else
+		break;
+#ifndef HAVE_BGL
+	case SELECT_HTC_S:
+		return "htc_s";
+		break;
+	case SELECT_HTC_D:
+		return "htc_d";
+		break;
+	case SELECT_HTC_V:
+		return "htc_v";
+		break;
+	case SELECT_HTC_L:
+		return "htc_l";
+		break;
+#endif
+	default: 
 		return "n/a";
+	}
 }
 
 static char *_yes_no_string(uint16_t inx)
diff --git a/src/common/parse_time.c b/src/common/parse_time.c
index beef4b602..38576d74b 100644
--- a/src/common/parse_time.c
+++ b/src/common/parse_time.c
@@ -592,3 +592,48 @@ extern void mins2time_str(uint32_t time, char *string, int size)
 				hours, minutes, seconds);
 	}
 }
+
+extern char *mon_abbr(int mon)
+{
+	switch(mon) {
+	case 0:
+		return "Ja";
+		break;
+	case 1:
+		return "Fe";
+		break;
+	case 2:
+		return "Ma";
+		break;
+	case 3:
+		return "Ap";
+		break;
+	case 4:
+		return "Ma";
+		break;
+	case 5:
+		return "Ju";
+		break;
+	case 6:
+		return "Jl";
+		break;
+	case 7:
+		return "Au";
+		break;
+	case 8:
+		return "Se";
+		break;
+	case 9:
+		return "Oc";
+		break;
+	case 10:
+		return "No";
+		break;
+	case 11:
+		return "De";
+		break;
+	default:
+		return "Un";
+		break;
+	}
+}
diff --git a/src/common/parse_time.h b/src/common/parse_time.h
index c891bd073..57a731668 100644
--- a/src/common/parse_time.h
+++ b/src/common/parse_time.h
@@ -96,4 +96,7 @@ extern int time_str2mins(char *string);
 extern void secs2time_str(time_t time, char *string, int size);
 extern void mins2time_str(uint32_t time, char *string, int size);
 
+/* used to get a 2 char abbriviated month name from int 0-11 */
+extern char *mon_abbr(int mon);
+
 #endif
diff --git a/src/common/proc_args.c b/src/common/proc_args.c
index ed6d9de03..1a690c2ba 100644
--- a/src/common/proc_args.c
+++ b/src/common/proc_args.c
@@ -125,6 +125,7 @@ task_dist_states_t verify_dist_type(const char *arg, uint32_t *plane_size)
  */
 int verify_conn_type(const char *arg)
 {
+#ifdef HAVE_BG
 	int len = strlen(arg);
 
 	if (!strncasecmp(arg, "MESH", len))
@@ -133,7 +134,18 @@ int verify_conn_type(const char *arg)
 		return SELECT_TORUS;
 	else if (!strncasecmp(arg, "NAV", len))
 		return SELECT_NAV;
-
+#ifndef HAVE_BGL
+	else if (!strncasecmp(arg, "HTC", len)
+		 || !strncasecmp(arg, "HTC_S", len))
+		return SELECT_HTC_S;
+	else if (!strncasecmp(arg, "HTC_D", len))
+		return SELECT_HTC_D;
+	else if (!strncasecmp(arg, "HTC_V", len))
+		return SELECT_HTC_V;
+	else if (!strncasecmp(arg, "HTC_L", len))
+		return SELECT_HTC_L;
+#endif
+#endif
 	error("invalid --conn-type argument %s ignored.", arg);
 	return NO_VAL;
 }
diff --git a/src/common/read_config.c b/src/common/read_config.c
index 309a64bd1..38f56f8ce 100644
--- a/src/common/read_config.c
+++ b/src/common/read_config.c
@@ -125,7 +125,7 @@ static void validate_and_set_defaults(slurm_ctl_conf_t *conf,
 				      s_p_hashtbl_t *hashtbl);
 
 s_p_options_t slurm_conf_options[] = {
-	{"AccountingStorageEnforce", S_P_UINT16},
+	{"AccountingStorageEnforce", S_P_STRING},
 	{"AccountingStorageHost", S_P_STRING},
 	{"AccountingStorageLoc", S_P_STRING},
 	{"AccountingStoragePass", S_P_STRING},
@@ -1764,6 +1764,10 @@ validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 	if (!s_p_get_string(&conf->mpi_default, "MpiDefault", hashtbl))
 		conf->mpi_default = xstrdup(DEFAULT_MPI_DEFAULT);
 
+	if(!s_p_get_boolean((bool *)&conf->track_wckey, 
+			    "TrackWCKey", hashtbl))
+		conf->track_wckey = false;
+
 	if (!s_p_get_string(&conf->accounting_storage_type,
 			    "AccountingStorageType", hashtbl)) {
 		if(default_storage_type)
@@ -1775,9 +1779,30 @@ validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 				xstrdup(DEFAULT_ACCOUNTING_STORAGE_TYPE);
 	}
 
-	if (!s_p_get_uint16(&conf->accounting_storage_enforce, 
-			    "AccountingStorageEnforce", hashtbl))
-		conf->accounting_storage_enforce = DEFAULT_ACCOUNTING_ENFORCE;
+	if (s_p_get_string(&temp_str, "AccountingStorageEnforce", hashtbl)) {
+		if (strstr(temp_str, "1") || strstr(temp_str, "associations"))
+			conf->accounting_storage_enforce 
+				|= ACCOUNTING_ENFORCE_ASSOCS;
+		if (strstr(temp_str, "2") || strstr(temp_str, "limits")) {
+			conf->accounting_storage_enforce 
+				|= ACCOUNTING_ENFORCE_ASSOCS;
+			conf->accounting_storage_enforce 
+				|= ACCOUNTING_ENFORCE_LIMITS;
+		}
+		if (strstr(temp_str, "wckeys")) {
+			conf->accounting_storage_enforce 
+				|= ACCOUNTING_ENFORCE_ASSOCS;
+			conf->accounting_storage_enforce 
+				|= ACCOUNTING_ENFORCE_WCKEYS;
+			conf->track_wckey = true;
+		}		
+		if (strstr(temp_str, "all")) {
+			conf->accounting_storage_enforce = 0xffff;
+			conf->track_wckey = true;
+		}		
+			
+		xfree(temp_str);
+	}
 
 	if (!s_p_get_string(&conf->accounting_storage_host,
 			    "AccountingStorageHost", hashtbl)) {
@@ -2040,10 +2065,6 @@ validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 	if (!s_p_get_uint16(&conf->wait_time, "WaitTime", hashtbl))
 		conf->wait_time = DEFAULT_WAIT_TIME;
 	
-	if(!s_p_get_boolean((bool *)&conf->track_wckey, 
-			    "TrackWCKey", hashtbl))
-		conf->track_wckey = false;
-
 	if (s_p_get_uint16(&conf->tree_width, "TreeWidth", hashtbl)) {
 		if (conf->tree_width == 0) {
 			error("TreeWidth=0 is invalid");
diff --git a/src/common/read_config.h b/src/common/read_config.h
index 9ee1f9153..b70006654 100644
--- a/src/common/read_config.h
+++ b/src/common/read_config.h
@@ -50,13 +50,11 @@ extern char *default_slurm_config_file;
 extern char *default_plugin_path;
 extern char *default_plugstack;
 
-enum {
-	ACCOUNTING_ENFORCE_NONE,
-	ACCOUNTING_ENFORCE_YES,
-	ACCOUNTING_ENFORCE_WITH_LIMITS
-};
+#define ACCOUNTING_ENFORCE_ASSOCS 0x0001
+#define ACCOUNTING_ENFORCE_LIMITS 0x0002
+#define ACCOUNTING_ENFORCE_WCKEYS 0x0004
 
-#define DEFAULT_ACCOUNTING_ENFORCE  ACCOUNTING_ENFORCE_NONE
+#define DEFAULT_ACCOUNTING_ENFORCE  0
 #define DEFAULT_ACCOUNTING_STORAGE_TYPE "accounting_storage/none"
 #define DEFAULT_AUTH_TYPE          "auth/none"
 #define DEFAULT_BATCH_START_TIMEOUT 10
diff --git a/src/common/slurm_accounting_storage.c b/src/common/slurm_accounting_storage.c
index 166d84a52..98a50322b 100644
--- a/src/common/slurm_accounting_storage.c
+++ b/src/common/slurm_accounting_storage.c
@@ -326,13 +326,14 @@ static slurm_acct_storage_context_t *_acct_storage_context_create(
  */
 static int _acct_storage_context_destroy(slurm_acct_storage_context_t *c)
 {
+	int rc = SLURM_SUCCESS;
 	/*
 	 * Must check return code here because plugins might still
 	 * be loaded and active.
 	 */
 	if ( c->plugin_list ) {
 		if ( plugrack_destroy( c->plugin_list ) != SLURM_SUCCESS ) {
-			return SLURM_ERROR;
+			rc = SLURM_ERROR;
 		}
 	} else {
 		plugin_unload(c->cur_plugin);
@@ -341,7 +342,7 @@ static int _acct_storage_context_destroy(slurm_acct_storage_context_t *c)
 	xfree( c->acct_storage_type );
 	xfree( c );
 
-	return SLURM_SUCCESS;
+	return rc;
 }
 
 /* 
diff --git a/src/common/slurm_accounting_storage.h b/src/common/slurm_accounting_storage.h
index 99be52ec3..af73d80e5 100644
--- a/src/common/slurm_accounting_storage.h
+++ b/src/common/slurm_accounting_storage.h
@@ -845,7 +845,7 @@ extern List acct_storage_g_get_accounts(void *db_conn,  uint32_t uid,
  * note List needs to be freed when called
  */
 extern List acct_storage_g_get_clusters(
-	void *db_conn,  uint32_t uid, acct_cluster_cond_t *cluster_cond);
+	void *db_conn, uint32_t uid, acct_cluster_cond_t *cluster_cond);
 
 /* 
  * get info from the storage 
diff --git a/src/common/slurm_auth.c b/src/common/slurm_auth.c
index 3df7e6d0e..11d4b7f5e 100644
--- a/src/common/slurm_auth.c
+++ b/src/common/slurm_auth.c
@@ -280,13 +280,15 @@ slurm_auth_generic_errstr( int slurm_errno )
 static int
 _slurm_auth_context_destroy( slurm_auth_context_t c )
 {    
+	int rc = SLURM_SUCCESS;
+
         /*
          * Must check return code here because plugins might still
          * be loaded and active.
          */
         if ( c->plugin_list ) {
                 if ( plugrack_destroy( c->plugin_list ) != SLURM_SUCCESS ) {
-                        return SLURM_ERROR;
+                        rc =  SLURM_ERROR;
                 }
         } else {
 		plugin_unload(c->cur_plugin);
@@ -295,7 +297,7 @@ _slurm_auth_context_destroy( slurm_auth_context_t c )
         xfree( c->auth_type );
         xfree( c );
         
-        return SLURM_SUCCESS;
+        return rc;
 }
 
 int inline
diff --git a/src/common/slurm_cred.c b/src/common/slurm_cred.c
index 530871494..8163a9d12 100644
--- a/src/common/slurm_cred.c
+++ b/src/common/slurm_cred.c
@@ -1,6 +1,6 @@
 /*****************************************************************************\
  *  src/common/slurm_cred.c - SLURM job credential functions
- *  $Id: slurm_cred.c 15765 2008-11-25 01:07:17Z jette $
+ *  $Id: slurm_cred.c 17005 2009-03-24 21:57:43Z da $
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008 Lawrence Livermore National Security.
@@ -274,13 +274,14 @@ _slurm_crypto_context_create( const char *crypto_type)
 static int
 _slurm_crypto_context_destroy( slurm_crypto_context_t *c )
 {
+	int rc = SLURM_SUCCESS;
 	/*
 	 * Must check return code here because plugins might still
 	 * be loaded and active.
 	 */
 	if ( c->plugin_list ) {
 		if ( plugrack_destroy( c->plugin_list ) != SLURM_SUCCESS ) {
-			 return SLURM_ERROR;
+			 rc = SLURM_ERROR;
 		}
 	} else {
 		plugin_unload(c->cur_plugin);
@@ -289,7 +290,7 @@ _slurm_crypto_context_destroy( slurm_crypto_context_t *c )
 	xfree( c->crypto_type );
 	xfree( c );
 
-	return SLURM_SUCCESS;
+	return rc;
 }
 
 /*
diff --git a/src/common/slurm_errno.c b/src/common/slurm_errno.c
index 0cdab103d..e04bcfbd7 100644
--- a/src/common/slurm_errno.c
+++ b/src/common/slurm_errno.c
@@ -212,6 +212,8 @@ static slurm_errtab_t slurm_errtab[] = {
 	  "Memory required by task is not available"		},
 	{ ESLURM_INVALID_ACCOUNT,
 	  "Job has invalid account"				},
+	{ ESLURM_INVALID_WCKEY,
+	  "Job has invalid wckey"				},
 	{ ESLURM_INVALID_LICENSES,
 	  "Job has invalid license specification"		},
 	{ ESLURM_NEED_RESTART,
diff --git a/src/common/slurm_jobacct_gather.c b/src/common/slurm_jobacct_gather.c
index aff50ffcf..834c22838 100644
--- a/src/common/slurm_jobacct_gather.c
+++ b/src/common/slurm_jobacct_gather.c
@@ -142,13 +142,14 @@ _slurm_jobacct_gather_context_create( const char *jobacct_gather_type)
 static int
 _slurm_jobacct_gather_context_destroy( slurm_jobacct_gather_context_t *c )
 {
+	int rc = SLURM_SUCCESS;
 	/*
 	 * Must check return code here because plugins might still
 	 * be loaded and active.
 	 */
 	if ( c->plugin_list ) {
 		if ( plugrack_destroy( c->plugin_list ) != SLURM_SUCCESS ) {
-			 return SLURM_ERROR;
+			 rc = SLURM_ERROR;
 		}
 	} else {
 		plugin_unload(c->cur_plugin);
@@ -157,7 +158,7 @@ _slurm_jobacct_gather_context_destroy( slurm_jobacct_gather_context_t *c )
 	xfree( c->jobacct_gather_type );
 	xfree( c );
 
-	return SLURM_SUCCESS;
+	return rc;
 }
 
 /*
diff --git a/src/common/slurm_jobcomp.c b/src/common/slurm_jobcomp.c
index b308e61d4..b9ed9d158 100644
--- a/src/common/slurm_jobcomp.c
+++ b/src/common/slurm_jobcomp.c
@@ -118,13 +118,14 @@ _slurm_jobcomp_context_create( const char *jobcomp_type)
 static int
 _slurm_jobcomp_context_destroy( slurm_jobcomp_context_t c )
 {
+	int rc = SLURM_SUCCESS;
 	/*
 	 * Must check return code here because plugins might still
 	 * be loaded and active.
 	 */
 	if ( c->plugin_list ) {
 		if ( plugrack_destroy( c->plugin_list ) != SLURM_SUCCESS ) {
-			 return SLURM_ERROR;
+			 rc = SLURM_ERROR;
 		}
 	} else {
 		plugin_unload(c->cur_plugin);
@@ -133,7 +134,7 @@ _slurm_jobcomp_context_destroy( slurm_jobcomp_context_t c )
 	xfree( c->jobcomp_type );
 	xfree( c );
 
-	return SLURM_SUCCESS;
+	return rc;
 }
 
 /*
diff --git a/src/common/slurm_protocol_api.c b/src/common/slurm_protocol_api.c
index 65d1f071e..3bf706dba 100644
--- a/src/common/slurm_protocol_api.c
+++ b/src/common/slurm_protocol_api.c
@@ -678,7 +678,7 @@ int slurm_get_is_association_based_accounting(void)
 		conf = slurm_conf_lock();
 		if(!strcasecmp(conf->accounting_storage_type, 
 			      "accounting_storage/slurmdbd")
-		   || strcasecmp(conf->accounting_storage_type,
+		   || !strcasecmp(conf->accounting_storage_type,
 				 "accounting_storage/mysql")) 
 			enforce = 1;
 		slurm_conf_unlock();
diff --git a/src/common/slurm_protocol_socket_implementation.c b/src/common/slurm_protocol_socket_implementation.c
index 981955391..bf1f3190a 100644
--- a/src/common/slurm_protocol_socket_implementation.c
+++ b/src/common/slurm_protocol_socket_implementation.c
@@ -77,7 +77,7 @@
 #include "src/common/xmalloc.h"
 #include "src/common/util-net.h"
 
-#define PORT_RETRIES    2
+#define PORT_RETRIES    3
 #define MIN_USER_PORT   (IPPORT_RESERVED + 1)
 #define MAX_USER_PORT   0xffff
 #define RANDOM_USER_PORT ((uint16_t) ((lrand48() % \
@@ -525,7 +525,7 @@ slurm_fd _slurm_open_stream(slurm_addr *addr, bool retry)
 				    sizeof(*addr));
 		if (rc >= 0)		    /* success */
 			break;
-		if ((errno != ECONNREFUSED) || 
+		if (((errno != ECONNREFUSED) && (errno != ETIMEDOUT)) ||
 		    (!retry) || (retry_cnt >= PORT_RETRIES)) {
 			slurm_seterrno(errno);
 			goto error;
diff --git a/src/common/slurmdbd_defs.c b/src/common/slurmdbd_defs.c
index 634f0737f..33e0e5819 100644
--- a/src/common/slurmdbd_defs.c
+++ b/src/common/slurmdbd_defs.c
@@ -145,10 +145,13 @@ extern int slurm_open_slurmdbd_conn(char *auth_info, bool make_agent,
 		tmp_errno = errno;
 	}
 	slurm_mutex_unlock(&slurmdbd_lock);
-
+	
 	slurm_mutex_lock(&agent_lock);
 	if (make_agent && ((agent_tid == 0) || (agent_list == NULL)))
 		_create_agent();
+	else if(agent_list)
+		_load_dbd_state();
+
 	slurm_mutex_unlock(&agent_lock);
 	
 	if(tmp_errno) {
@@ -203,7 +206,8 @@ extern int slurm_send_slurmdbd_recv_rc_msg(uint16_t rpc_version,
 	} else {	/* resp->msg_type == DBD_RC */
 		dbd_rc_msg_t *msg = resp->data;
 		*resp_code = msg->return_code;
-		if(msg->return_code != SLURM_SUCCESS)
+		if(msg->return_code != SLURM_SUCCESS
+		   && msg->return_code != ACCOUNTING_FIRST_REG)
 			error("slurmdbd(%d): from %u: %s", msg->return_code, 
 			      msg->sent_type, msg->comment);
 		slurmdbd_free_rc_msg(rpc_version, msg);
@@ -244,9 +248,10 @@ extern int slurm_send_recv_slurmdbd_msg(uint16_t rpc_version,
 	rc = _send_msg(buffer);
 	free_buf(buffer);
 	if (rc != SLURM_SUCCESS) {
-		error("slurmdbd: Sending message type %u", req->msg_type);
+		error("slurmdbd: Sending message type %u: %d: %m",
+		      req->msg_type, rc);
 		slurm_mutex_unlock(&slurmdbd_lock);
-		return SLURM_ERROR;
+		return rc;
 	}
 
 	buffer = _recv_msg(read_timeout);
@@ -1190,7 +1195,7 @@ static int _send_init_msg()
 	rc = _send_msg(buffer);
 	free_buf(buffer);
 	if (rc != SLURM_SUCCESS) {
-		error("slurmdbd: Sending DBD_INIT message");
+		error("slurmdbd: Sending DBD_INIT message: %d: %m", rc);
 		return rc;
 	}
 
@@ -1519,6 +1524,9 @@ static int _fd_writeable(slurm_fd fd)
  ****************************************************************************/
 static void _create_agent(void)
 {
+	/* this needs to be set because the agent thread will do
+	   nothing if the connection was closed and then opened again */
+	agent_shutdown = 0;
 	if (agent_list == NULL) {
 		agent_list = list_create(_agent_queue_del);
 		if (agent_list == NULL)
@@ -1649,7 +1657,7 @@ static void *_agent(void *x)
 				slurm_mutex_unlock(&slurmdbd_lock);
 				break;
 			}
-			error("slurmdbd: Failure sending message");
+			error("slurmdbd: Failure sending message: %d: %m", rc);
 		} else {
 			rc = _get_return_code(SLURMDBD_VERSION, read_timeout);
 			if (rc == EAGAIN) {
@@ -1658,7 +1666,7 @@ static void *_agent(void *x)
 					break;
 				}
 				error("slurmdbd: Failure with "
-				      "message need to resend");
+				      "message need to resend: %d: %m", rc);
 			}
 		}
 		slurm_mutex_unlock(&slurmdbd_lock);
@@ -1741,12 +1749,18 @@ static void _load_dbd_state(void)
 	char *dbd_fname;
 	Buf buffer;
 	int fd, recovered = 0;
-
+	
 	dbd_fname = slurm_get_state_save_location();
 	xstrcat(dbd_fname, "/dbd.messages");
 	fd = open(dbd_fname, O_RDONLY);
 	if (fd < 0) {
-		error("slurmdbd: Opening state save file %s", dbd_fname);
+		/* don't print an error message if there is no file */
+		if(errno == ENOENT)
+			debug4("slurmdbd: There is no state save file to "
+			       "open by name %s", dbd_fname);
+		else
+			error("slurmdbd: Opening state save file %s: %m",
+			      dbd_fname);
 	} else {
 		while (1) {
 			buffer = _load_dbd_rec(fd);
@@ -1756,8 +1770,7 @@ static void _load_dbd_state(void)
 				fatal("slurmdbd: list_enqueue, no memory");
 			recovered++;
 		}
-	}
-	if (fd >= 0) {
+	
 		verbose("slurmdbd: recovered %d pending RPCs", recovered);
 		(void) close(fd);
 		(void) unlink(dbd_fname);	/* clear save state */
diff --git a/src/common/switch.c b/src/common/switch.c
index 0a111f9a5..f7ea41c0c 100644
--- a/src/common/switch.c
+++ b/src/common/switch.c
@@ -160,13 +160,14 @@ _slurm_switch_context_create(const char *switch_type)
 static int
 _slurm_switch_context_destroy( slurm_switch_context_t c )
 {
+	int rc = SLURM_SUCCESS;
 	/*
 	 * Must check return code here because plugins might still
 	 * be loaded and active.
 	 */
 	if ( c->plugin_list ) {
 		if ( plugrack_destroy( c->plugin_list ) != SLURM_SUCCESS ) {
-			return SLURM_ERROR;
+			rc = SLURM_ERROR;
 		}
 	} else {
 		plugin_unload(c->cur_plugin);
@@ -175,7 +176,7 @@ _slurm_switch_context_destroy( slurm_switch_context_t c )
 	xfree( c->switch_type );
 	xfree( c );
 
-	return SLURM_SUCCESS;
+	return rc;
 }
 
 /*
diff --git a/src/common/uid.c b/src/common/uid.c
index 5418fd837..ad47db4c1 100644
--- a/src/common/uid.c
+++ b/src/common/uid.c
@@ -1,6 +1,6 @@
 /*****************************************************************************\
  * src/common/uid.c - uid/gid lookup utility functions
- * $Id: uid.c 14795 2008-08-15 21:54:22Z jette $
+ * $Id: uid.c 17177 2009-04-07 18:09:43Z jette $
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -110,6 +110,24 @@ uid_to_string (uid_t uid)
 	return ustring;
 }
 
+gid_t
+gid_from_uid (uid_t uid)
+{
+	struct passwd pwd, *result;
+	char buffer[PW_BUF_SIZE];
+	gid_t gid;
+	int rc;
+
+	rc = getpwuid_r(uid, &pwd, buffer, PW_BUF_SIZE, &result);
+	if (result == NULL) {
+		gid = (gid_t) -1;
+	} else {
+		gid = result->pw_gid;
+	}
+
+	return gid;
+}
+
 gid_t
 gid_from_string (char *name)
 {
diff --git a/src/common/uid.h b/src/common/uid.h
index 9ded73457..06701af57 100644
--- a/src/common/uid.h
+++ b/src/common/uid.h
@@ -1,6 +1,6 @@
 /*****************************************************************************\
  * src/common/uid.h - uid/gid lookup utility functions
- * $Id: uid.h 14795 2008-08-15 21:54:22Z jette $
+ * $Id: uid.h 17177 2009-04-07 18:09:43Z jette $
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -59,6 +59,12 @@
  */
 uid_t uid_from_string (char *name);
 
+/*
+ * Return the primary group id for a given user id, or 
+ * (gid_t) -1 on failure.
+ */
+gid_t gid_from_uid (uid_t uid);
+
 /*
  * Same as uid_from_name(), but for group name/id.
  */
diff --git a/src/database/mysql_common.c b/src/database/mysql_common.c
index b0a76260d..9e6a3b9f2 100644
--- a/src/database/mysql_common.c
+++ b/src/database/mysql_common.c
@@ -66,7 +66,12 @@ static int _clear_results(MYSQL *mysql_db)
 			      mysql_errno(mysql_db),
 			      mysql_error(mysql_db));
 	} while (rc == 0);
-	
+
+	if(rc > 0) {
+		errno = rc;
+		return SLURM_ERROR;
+	} 
+
 	return SLURM_SUCCESS;
 }
 
@@ -112,6 +117,7 @@ static int _mysql_make_table_current(MYSQL *mysql_db, char *table_name,
 				     storage_field_t *fields, char *ending)
 {
 	char *query = NULL;
+	char *correct_query = NULL;
 	MYSQL_RES *result = NULL;
 	MYSQL_ROW row;
 	int i = 0;
@@ -162,6 +168,7 @@ static int _mysql_make_table_current(MYSQL *mysql_db, char *table_name,
 
 	itr = list_iterator_create(columns);
 	query = xstrdup_printf("alter table %s", table_name);
+	correct_query = xstrdup_printf("alter table %s", table_name);
 	START_TIMER;
 	while(fields[i].name) {
 		int found = 0;
@@ -171,6 +178,9 @@ static int _mysql_make_table_current(MYSQL *mysql_db, char *table_name,
 				xstrfmtcat(query, " modify %s %s,",
 					   fields[i].name,
 					   fields[i].options);
+				xstrfmtcat(correct_query, " modify %s %s,",
+					   fields[i].name,
+					   fields[i].options);
 				list_delete_item(itr);
 				found = 1;
 				break;
@@ -186,6 +196,9 @@ static int _mysql_make_table_current(MYSQL *mysql_db, char *table_name,
 					   fields[i].name,
 					   fields[i].options,
 					   fields[i-1].name);
+				xstrfmtcat(correct_query, " modify %s %s,",
+					   fields[i].name,
+					   fields[i].options);
 			} else {
 				info("adding column %s at the beginning "
 				     "of table %s",
@@ -195,6 +208,9 @@ static int _mysql_make_table_current(MYSQL *mysql_db, char *table_name,
 				xstrfmtcat(query, " add %s %s first,",
 					   fields[i].name,
 					   fields[i].options);
+				xstrfmtcat(correct_query, " modify %s %s,",
+					   fields[i].name,
+					   fields[i].options);
 			}
 			adding = 1;
 		}
@@ -228,9 +244,13 @@ static int _mysql_make_table_current(MYSQL *mysql_db, char *table_name,
 		if(temp[end]) {
 			end++;
 			primary_key = xstrndup(temp, end);
-			if(old_primary)
+			if(old_primary) {
 				xstrcat(query, " drop primary key,");
+				xstrcat(correct_query, " drop primary key,");
+			}
 			xstrfmtcat(query, " add %s,",  primary_key);
+			xstrfmtcat(correct_query, " add %s,",  primary_key);
+			
 			xfree(primary_key);
 		}
 	}
@@ -251,16 +271,21 @@ static int _mysql_make_table_current(MYSQL *mysql_db, char *table_name,
 		if(temp[end]) {
 			end++;
 			unique_index = xstrndup(temp, end);
-			if(old_index)
+			if(old_index) {
 				xstrfmtcat(query, " drop index %s,",
 					   old_index);
+				xstrfmtcat(correct_query, " drop index %s,",
+					   old_index);
+			}
 			xstrfmtcat(query, " add %s,", unique_index);
+			xstrfmtcat(correct_query, " add %s,", unique_index);
 			xfree(unique_index);
 		}
 	}
 	xfree(old_index);
 
 	query[strlen(query)-1] = ';';
+	correct_query[strlen(correct_query)-1] = ';';
 	//info("%d query\n%s", __LINE__, query);
 
 	/* see if we have already done this definition */
@@ -287,6 +312,7 @@ static int _mysql_make_table_current(MYSQL *mysql_db, char *table_name,
 		char *query2 = NULL;
 	
 		debug("Table %s has changed.  Updating...", table_name);
+
 		if(mysql_db_query(mysql_db, query)) {
 			xfree(query);
 			return SLURM_ERROR;
@@ -298,15 +324,17 @@ static int _mysql_make_table_current(MYSQL *mysql_db, char *table_name,
 					"on duplicate key update "
 					"definition=\"%s\", mod_time=%d;",
 					table_defs_table, now, now,
-					table_name, query, query, now);
+					table_name, correct_query,
+					correct_query, now);
 		if(mysql_db_query(mysql_db, query2)) {
 			xfree(query2);
 			return SLURM_ERROR;
 		}
 		xfree(query2);
 	}
-	
+
 	xfree(query);
+	xfree(correct_query);
 	query = xstrdup_printf("make table current %s", table_name);
 	END_TIMER2(query);
 	xfree(query);
@@ -377,17 +405,18 @@ extern int mysql_get_db_connection(MYSQL **mysql_db, char *db_name,
 {
 	int rc = SLURM_SUCCESS;
 	bool storage_init = false;
-
+	
 	if(!(*mysql_db = mysql_init(*mysql_db)))
 		fatal("mysql_init failed: %s", mysql_error(*mysql_db));
 	else {
+		unsigned int my_timeout = 30;
 #ifdef MYSQL_OPT_RECONNECT
-{
 		my_bool reconnect = 1;
 		/* make sure reconnect is on */
 		mysql_options(*mysql_db, MYSQL_OPT_RECONNECT, &reconnect);
-}
 #endif
+		mysql_options(*mysql_db, MYSQL_OPT_CONNECT_TIMEOUT,
+			      (char *)&my_timeout);
 		while(!storage_init) {
 			if(!mysql_real_connect(*mysql_db, db_info->host,
 					       db_info->user, db_info->pass,
@@ -544,6 +573,16 @@ extern MYSQL_RES *mysql_db_query_ret(MYSQL *mysql_db, char *query, bool last)
 	return result;
 }
 
+extern int mysql_db_query_check_after(MYSQL *mysql_db, char *query)
+{
+	int rc = SLURM_SUCCESS;
+		
+	if((rc = mysql_db_query(mysql_db, query)) != SLURM_ERROR)  
+		rc = _clear_results(mysql_db);
+	
+	return rc;
+}
+
 extern int mysql_insert_ret_id(MYSQL *mysql_db, char *query)
 {
 	int new_id = 0;
diff --git a/src/database/mysql_common.h b/src/database/mysql_common.h
index c8fcb76a1..b6aba93e9 100644
--- a/src/database/mysql_common.h
+++ b/src/database/mysql_common.h
@@ -95,6 +95,7 @@ extern int mysql_db_commit(MYSQL *mysql_db);
 extern int mysql_db_rollback(MYSQL *mysql_db);
 
 extern MYSQL_RES *mysql_db_query_ret(MYSQL *mysql_db, char *query, bool last);
+extern int mysql_db_query_check_after(MYSQL *mysql_db, char *query);
 
 extern int mysql_insert_ret_id(MYSQL *mysql_db, char *query);
 
diff --git a/src/plugins/accounting_storage/filetxt/accounting_storage_filetxt.c b/src/plugins/accounting_storage/filetxt/accounting_storage_filetxt.c
index 4290bb408..621f93008 100644
--- a/src/plugins/accounting_storage/filetxt/accounting_storage_filetxt.c
+++ b/src/plugins/accounting_storage/filetxt/accounting_storage_filetxt.c
@@ -488,8 +488,8 @@ extern int jobacct_storage_p_job_start(void *db_conn, char *cluster_name,
 {
 	int	i,
 		rc=SLURM_SUCCESS;
-	char	buf[BUFFER_SIZE], *jname, *account, *nodes;
-	char    *wckey = NULL;
+	char	buf[BUFFER_SIZE], *account, *nodes;
+	char    *jname = NULL, *wckey = NULL;
 	long	priority;
 	int track_steps = 0;
 
@@ -509,7 +509,7 @@ extern int jobacct_storage_p_job_start(void *db_conn, char *cluster_name,
 	}
 
 	priority = (job_ptr->priority == NO_VAL) ?
-		-1L : (long) job_ptr->priority;
+		   -1L : (long) job_ptr->priority;
 
 	if (job_ptr->name && job_ptr->name[0]) {
 		char *temp = NULL;
diff --git a/src/plugins/accounting_storage/filetxt/filetxt_jobacct_process.c b/src/plugins/accounting_storage/filetxt/filetxt_jobacct_process.c
index 907d936d1..cc4cc6d0d 100644
--- a/src/plugins/accounting_storage/filetxt/filetxt_jobacct_process.c
+++ b/src/plugins/accounting_storage/filetxt/filetxt_jobacct_process.c
@@ -640,23 +640,6 @@ static filetxt_job_rec_t *_find_job_record(List job_list,
 	return job;
 }
 
-static int _remove_job_record(List job_list, uint32_t jobnum)
-{
-	filetxt_job_rec_t *job = NULL;
-	int rc = SLURM_ERROR;
-	ListIterator itr = list_iterator_create(job_list);
-
-	while((job = (filetxt_job_rec_t *)list_next(itr)) != NULL) {
-		if (job->header.jobnum == jobnum) {
-			list_remove(itr);
-			_destroy_filetxt_job_rec(job);
-			rc = SLURM_SUCCESS;
-		}
-	}
-	list_iterator_destroy(itr);
-	return rc;
-}
-
 static filetxt_step_rec_t *_find_step_record(filetxt_job_rec_t *job,
 					     long stepnum)
 {
@@ -704,9 +687,12 @@ static int _parse_line(char *f[], void **data, int len)
 		(*job)->priority = atoi(f[F_PRIORITY]);
 		(*job)->ncpus = atoi(f[F_NCPUS]);
 		(*job)->nodes = xstrdup(f[F_NODES]);
+
 		for (i=0; (*job)->nodes[i]; i++) { /* discard trailing <CR> */
-			if (isspace((*job)->nodes[i]))
+			if (isspace((*job)->nodes[i])) {
 				(*job)->nodes[i] = '\0';
+				info("got here");
+			}
 		}
 		if (!strcmp((*job)->nodes, "(null)")) {
 			xfree((*job)->nodes);
@@ -814,7 +800,7 @@ static int _parse_line(char *f[], void **data, int len)
 			(*job)->exitcode = atoi(f[F_JOB_EXITCODE]);
 		break;
 	default:
-		printf("UNKOWN TYPE %d",i);
+		error("UNKOWN TYPE %d",i);
 		break;
 	}
 	return SLURM_SUCCESS;
@@ -828,19 +814,21 @@ static void _process_start(List job_list, char *f[], int lc,
 
 	_parse_line(f, (void **)&temp, len);
 	job = _find_job_record(job_list, temp->header, JOB_START);
-	if (job) {	/* Hmmm... that's odd */
-		printf("job->header.job_submit = %d",
-		       (int)job->header.job_submit);
-		if(job->header.job_submit == 0)
-			_remove_job_record(job_list, job->header.jobnum);
-		else {
-			fprintf(stderr,
-				"Conflicting JOB_START for job %u at"
-				" line %d -- ignoring it\n",
-				job->header.jobnum, lc);
-			_destroy_filetxt_job_rec(temp);
-			return;
-		}
+	if (job) { 
+		/* in slurm we can get 2 start records one for submit
+		 * and one for start, so look at the last one */
+		xfree(job->jobname);
+		job->jobname = xstrdup(temp->jobname);
+		job->track_steps = temp->track_steps;
+		job->priority = temp->priority;
+		job->ncpus = temp->ncpus;
+		xfree(job->nodes);
+		job->nodes = xstrdup(temp->nodes);
+		xfree(job->account);
+		job->account = xstrdup(temp->account);
+
+		_destroy_filetxt_job_rec(temp);
+		return;
 	}
 	
 	job = temp;
@@ -861,7 +849,7 @@ static void _process_step(List job_list, char *f[], int lc,
 	_parse_line(f, (void **)&temp, len);
 	
 	job = _find_job_record(job_list, temp->header, JOB_STEP);
-	
+
 	if (temp->stepnum == -2) {
 		_destroy_filetxt_step_rec(temp);
 		return;
@@ -936,8 +924,7 @@ static void _process_step(List job_list, char *f[], int lc,
 		job->nodes = xstrdup(step->nodes);
 	}
 	
-got_step:
-	
+got_step:	
 		
 	if (job->job_terminated_seen == 0) {	/* If the job is still running,
 						   this is the most recent
@@ -1053,6 +1040,8 @@ extern List filetxt_jobacct_process_get_jobs(acct_job_cond_t *job_cond)
 	if(job_cond) {
 		fdump_flag = job_cond->duplicates & FDUMP_FLAG;
 		job_cond->duplicates &= (~FDUMP_FLAG);
+		if(!job_cond->duplicates) 
+			itr2 = list_iterator_create(ret_job_list);
 	}
 
 	fd = _open_log_file(filein);
@@ -1169,21 +1158,21 @@ extern List filetxt_jobacct_process_get_jobs(acct_job_cond_t *job_cond)
 		switch(rec_type) {
 		case JOB_START:
 			if(i < F_JOB_ACCOUNT) {
-				printf("Bad data on a Job Start\n");
+				error("Bad data on a Job Start\n");
 				_show_rec(f);
 			} else 
 				_process_start(job_list, f, lc, show_full, i);
 			break;
 		case JOB_STEP:
 			if(i < F_MAX_VSIZE) {
-				printf("Bad data on a Step entry\n");
+				error("Bad data on a Step entry\n");
 				_show_rec(f);
 			} else
 				_process_step(job_list, f, lc, show_full, i);
 			break;
 		case JOB_SUSPEND:
 			if(i < F_JOB_REQUID) {
-				printf("Bad data on a Suspend entry\n");
+				error("Bad data on a Suspend entry\n");
 				_show_rec(f);
 			} else
 				_process_suspend(job_list, f, lc,
@@ -1191,7 +1180,7 @@ extern List filetxt_jobacct_process_get_jobs(acct_job_cond_t *job_cond)
 			break;
 		case JOB_TERMINATED:
 			if(i < F_JOB_REQUID) {
-				printf("Bad data on a Job Term\n");
+				error("Bad data on a Job Term\n");
 				_show_rec(f);
 			} else
 				_process_terminated(job_list, f, lc,
@@ -1211,30 +1200,28 @@ extern List filetxt_jobacct_process_get_jobs(acct_job_cond_t *job_cond)
 	fclose(fd);
 
 	itr = list_iterator_create(job_list);
-	if(!job_cond->duplicates)
-		itr2 = list_iterator_create(ret_job_list);
+	
 	while((filetxt_job = list_next(itr))) {
 		jobacct_job_rec_t *jobacct_job = 
 			_create_jobacct_job_rec(filetxt_job, job_cond);
 		if(jobacct_job) {
 			jobacct_job_rec_t *curr_job = NULL;
-			if(job_cond && !job_cond->duplicates) {
+			if(itr2) {
+				list_iterator_reset(itr2);
 				while((curr_job = list_next(itr2))) {
 					if (curr_job->jobid == 
 					    jobacct_job->jobid) {
 						list_delete_item(itr2);
+						info("removing job %d", jobacct_job->jobid);
 						break;
 					}
 				}
 			}
 			list_append(ret_job_list, jobacct_job);
-			
-			if(!job_cond->duplicates)
-				list_iterator_reset(itr2);
 		}
 	}
 
-	if(!job_cond->duplicates)
+	if(itr2)
 		list_iterator_destroy(itr2);
 
 	list_iterator_destroy(itr);
@@ -1302,12 +1289,12 @@ extern int filetxt_jobacct_process_archive(acct_archive_cond_t *arch_cond)
 		goto finished;
 	}
 	if ((statbuf.st_mode & S_IFLNK) == S_IFLNK) {
-		fprintf(stderr, "%s is a symbolic link; --expire requires "
+		error("%s is a symbolic link; --expire requires "
 			"a hard-linked file name\n", filein);
 		goto finished;
 	}
 	if (!(statbuf.st_mode & S_IFREG)) {
-		fprintf(stderr, "%s is not a regular file; --expire "
+		error("%s is not a regular file; --expire "
 			"only works on accounting log files\n",
 			filein);
 		goto finished;
@@ -1324,7 +1311,7 @@ extern int filetxt_jobacct_process_archive(acct_archive_cond_t *arch_cond)
 			goto finished;
 		}
 	} else {
-		fprintf(stderr, "Warning! %s exists -- please remove "
+		error("Warning! %s exists -- please remove "
 			"or rename it before proceeding\n",
 			old_logfile_name);
 		goto finished;
@@ -1386,7 +1373,7 @@ extern int filetxt_jobacct_process_archive(acct_archive_cond_t *arch_cond)
 	sprintf(logfile_name, "%s.expired", filein);
 	new_file = stat(logfile_name, &statbuf);
 	if ((expired_logfile = fopen(logfile_name, "a"))==NULL) {
-		fprintf(stderr, "Error while opening %s", 
+		error("Error while opening %s", 
 			logfile_name);
 		perror("");
 		xfree(logfile_name);
@@ -1401,7 +1388,7 @@ extern int filetxt_jobacct_process_archive(acct_archive_cond_t *arch_cond)
 
 	logfile_name = _prefix_filename(filein, ".new.");
 	if ((new_logfile = fopen(logfile_name, "w"))==NULL) {
-		fprintf(stderr, "Error while opening %s",
+		error("Error while opening %s",
 			logfile_name);
 		perror("");
 		fclose(expired_logfile);
@@ -1421,16 +1408,16 @@ extern int filetxt_jobacct_process_archive(acct_archive_cond_t *arch_cond)
 	list_sort(keep_list, (ListCmpF) _cmp_jrec);
 	
 	/* if (params->opt_verbose > 2) { */
-/* 		fprintf(stderr, "--- contents of exp_list ---"); */
+/* 		error("--- contents of exp_list ---"); */
 /* 		itr = list_iterator_create(exp_list); */
 /* 		while((exp_rec = list_next(itr))) { */
 /* 			if (!(i%5)) */
-/* 				fprintf(stderr, "\n"); */
+/* 				error("\n"); */
 /* 			else */
-/* 				fprintf(stderr, "\t"); */
-/* 			fprintf(stderr, "%d", exp_rec->job); */
+/* 				error("\t"); */
+/* 			error("%d", exp_rec->job); */
 /* 		} */
-/* 		fprintf(stderr, "\n---- end of exp_list ---\n"); */
+/* 		error("\n---- end of exp_list ---\n"); */
 /* 		list_iterator_destroy(itr); */
 /* 	} */
 	/* write the expired file */
@@ -1506,10 +1493,10 @@ extern int filetxt_jobacct_process_archive(acct_archive_cond_t *arch_cond)
 		perror("renaming new logfile");
 		/* undo it? */
 		if (!rename(old_logfile_name, filein)) 
-			fprintf(stderr, "Please correct the problem "
+			error("Please correct the problem "
 				"and try again");
 		else
-			fprintf(stderr, "SEVERE ERROR: Current accounting "
+			error("SEVERE ERROR: Current accounting "
 				"log may have been renamed %s;\n"
 				"please rename it to \"%s\" if necessary, "
 			        "and try again\n",
@@ -1522,7 +1509,7 @@ extern int filetxt_jobacct_process_archive(acct_archive_cond_t *arch_cond)
 	file_err = slurm_reconfigure();
 	if (file_err) {
 		file_err = 1;
-		fprintf(stderr, "Error: Attempt to reconfigure "
+		error("Error: Attempt to reconfigure "
 			"SLURM failed.\n");
 		if (rename(old_logfile_name, filein)) {
 			perror("renaming logfile from .old.");
diff --git a/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c b/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c
index 8b223cc13..9820488c4 100644
--- a/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c
+++ b/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c
@@ -163,6 +163,41 @@ extern List acct_storage_p_remove_wckeys(mysql_conn_t *mysql_conn,
 					 uint32_t uid, 
 					 acct_wckey_cond_t *wckey_cond);
 
+static char *_get_cluster_from_associd(mysql_conn_t *mysql_conn,
+				       uint32_t associd)
+{
+	char *cluster = NULL;
+	char *query = NULL;
+	MYSQL_RES *result = NULL;
+	MYSQL_ROW row;
+	
+	/* Just so we don't have to keep a
+	   cache of the associations around we
+	   will just query the db for the cluster
+	   name of the association id.  Since
+	   this should sort of be a rare case
+	   this isn't too bad.
+	*/
+	query = xstrdup_printf("select cluster from %s where id=%u",
+			       assoc_table, associd);
+
+	debug4("%d(%d) query\n%s",
+	       mysql_conn->conn, __LINE__, query);
+	if(!(result = 
+	     mysql_db_query_ret(mysql_conn->db_conn, query, 0))) {
+		xfree(query);
+		return NULL;
+	}
+	xfree(query);
+	
+	if((row = mysql_fetch_row(result)))
+		cluster = xstrdup(row[0]);
+
+	mysql_free_result(result);
+	
+	return cluster;
+}
+
 static char *_get_user_from_associd(mysql_conn_t *mysql_conn, uint32_t associd)
 {
 	char *user = NULL;
@@ -1131,7 +1166,7 @@ static int _setup_association_cond_limits(acct_association_cond_t *assoc_cond,
 	} else if(assoc_cond->user_list) {
 		/* we want all the users, but no non-user associations */
 		set = 1;
-		xstrfmtcat(*extra, " && (%s.user!='')", prefix);		
+		xstrfmtcat(*extra, " && (%s.user!='')", prefix);
 	}
 
 	if(assoc_cond->partition_list 
@@ -2359,6 +2394,224 @@ static int _get_db_index(MYSQL *db_conn,
 	return db_index;
 }
 
+/* checks should already be done before this to see if this is a valid
+   user or not.
+*/
+static int _get_usage_for_list(mysql_conn_t *mysql_conn,
+			    slurmdbd_msg_type_t type, List object_list, 
+			    time_t start, time_t end)
+{
+#ifdef HAVE_MYSQL
+	int rc = SLURM_SUCCESS;
+	int i=0;
+	MYSQL_RES *result = NULL;
+	MYSQL_ROW row;
+	char *tmp = NULL;
+	char *my_usage_table = NULL;
+	char *query = NULL;
+	List usage_list = NULL;
+	char *id_str = NULL;
+	ListIterator itr = NULL, u_itr = NULL;
+	void *object = NULL;
+	acct_association_rec_t *assoc = NULL;
+	acct_wckey_rec_t *wckey = NULL;
+	acct_accounting_rec_t *accounting_rec = NULL;
+
+	/* Since for id in association table we
+	   use t3 and in wckey table we use t1 we can't define it here */
+	char **usage_req_inx = NULL;
+	
+	enum {
+		USAGE_ID,
+		USAGE_START,
+		USAGE_ACPU,
+		USAGE_COUNT
+	};
+
+
+	if(!object_list) {
+		error("We need an object to set data for getting usage");
+		return SLURM_ERROR;
+	}
+
+	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
+		return SLURM_ERROR;
+
+	switch (type) {
+	case DBD_GET_ASSOC_USAGE:
+	{
+		char *temp_usage[] = {
+			"t3.id",
+			"t1.period_start",
+			"t1.alloc_cpu_secs"
+		};
+		usage_req_inx = temp_usage;
+
+		itr = list_iterator_create(object_list);
+		while((assoc = list_next(itr))) {
+			if(id_str)
+				xstrfmtcat(id_str, " || t3.id=%d", assoc->id);
+			else
+				xstrfmtcat(id_str, "t3.id=%d", assoc->id);
+		}
+		list_iterator_destroy(itr);
+
+		my_usage_table = assoc_day_table;
+		break;
+	}
+	case DBD_GET_WCKEY_USAGE:
+	{
+		char *temp_usage[] = {
+			"id",
+			"period_start",
+			"alloc_cpu_secs"
+		};
+		usage_req_inx = temp_usage;
+
+		itr = list_iterator_create(object_list);
+		while((wckey = list_next(itr))) {
+			if(id_str)
+				xstrfmtcat(id_str, " || id=%d", wckey->id);
+			else
+				xstrfmtcat(id_str, "id=%d", wckey->id);
+		}
+		list_iterator_destroy(itr);
+
+		my_usage_table = wckey_day_table;
+		break;
+	}
+	default:
+		error("Unknown usage type %d", type);
+		return SLURM_ERROR;
+		break;
+	}
+
+	if(_set_usage_information(&my_usage_table, type, &start, &end)
+	   != SLURM_SUCCESS) {
+		xfree(id_str);
+		return SLURM_ERROR;
+	}
+
+	xfree(tmp);
+	i=0;
+	xstrfmtcat(tmp, "%s", usage_req_inx[i]);
+	for(i=1; i<USAGE_COUNT; i++) {
+		xstrfmtcat(tmp, ", %s", usage_req_inx[i]);
+	}
+	switch (type) {
+	case DBD_GET_ASSOC_USAGE:
+		query = xstrdup_printf(
+			"select %s from %s as t1, %s as t2, %s as t3 "
+			"where (t1.period_start < %d && t1.period_start >= %d) "
+			"&& t1.id=t2.id && (%s) && "
+			"t2.lft between t3.lft and t3.rgt "
+			"order by t3.id, period_start;",
+			tmp, my_usage_table, assoc_table, assoc_table,
+			end, start, id_str);
+		break;
+	case DBD_GET_WCKEY_USAGE:
+		query = xstrdup_printf(
+			"select %s from %s "
+			"where (period_start < %d && period_start >= %d) "
+			"&& %s order by id, period_start;",
+			tmp, my_usage_table, end, start, id_str);
+		break;
+	default:
+		error("Unknown usage type %d", type);
+		xfree(id_str);
+		xfree(tmp);
+		return SLURM_ERROR;
+		break;
+	}
+	xfree(id_str);
+	xfree(tmp);
+
+	debug4("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
+	if(!(result = mysql_db_query_ret(
+		     mysql_conn->db_conn, query, 0))) {
+		xfree(query);
+		return SLURM_ERROR;
+	}
+	xfree(query);
+
+	usage_list = list_create(destroy_acct_accounting_rec);
+
+	while((row = mysql_fetch_row(result))) {
+		acct_accounting_rec_t *accounting_rec =
+			xmalloc(sizeof(acct_accounting_rec_t));
+		accounting_rec->id = atoi(row[USAGE_ID]);
+		accounting_rec->period_start = atoi(row[USAGE_START]);
+		accounting_rec->alloc_secs = atoll(row[USAGE_ACPU]);
+		list_append(usage_list, accounting_rec);
+	}
+	mysql_free_result(result);
+	
+	u_itr = list_iterator_create(usage_list);
+	itr = list_iterator_create(object_list);
+	while((object = list_next(itr))) {
+		int found = 0;
+		int id = 0;
+		List acct_list = NULL;
+
+		switch (type) {
+		case DBD_GET_ASSOC_USAGE:
+			assoc = (acct_association_rec_t *)object;
+			if(!assoc->accounting_list)
+				assoc->accounting_list = list_create(
+					destroy_acct_accounting_rec);
+			acct_list = assoc->accounting_list;
+			id = assoc->id;
+			break;
+		case DBD_GET_WCKEY_USAGE:
+			wckey = (acct_wckey_rec_t *)object;
+			if(!wckey->accounting_list)
+				wckey->accounting_list = list_create(
+					destroy_acct_accounting_rec);
+			acct_list = wckey->accounting_list;
+			id = wckey->id;
+			break;
+		default:
+			continue;
+			break;
+		}
+		
+		while((accounting_rec = list_next(u_itr))) {
+			if(id == accounting_rec->id) {
+				list_append(acct_list, accounting_rec);
+				list_remove(u_itr);
+				found = 1;
+			} else if(found) {
+				/* here we know the
+				   list is in id order so
+				   if the next record
+				   isn't the correct id
+				   just continue since
+				   there is no reason to
+				   go through the rest of
+				   the list when we know
+				   it isn't going to be
+				   the correct id */
+				break;
+			}
+		}
+		list_iterator_reset(u_itr);
+	}
+	list_iterator_destroy(itr);
+	list_iterator_destroy(u_itr);	
+	
+	if(list_count(usage_list))
+		error("we have %d records not added "
+		      "to the association list",
+		      list_count(usage_list));
+	list_destroy(usage_list);
+
+
+	return rc;
+#else
+	return SLURM_ERROR;
+#endif
+}
+
 static mysql_db_info_t *_mysql_acct_create_db_info()
 {
 	mysql_db_info_t *db_info = xmalloc(sizeof(mysql_db_info_t));
@@ -2444,7 +2697,6 @@ static int _mysql_acct_check_tables(MYSQL *db_conn)
 		{ "control_host", "tinytext not null default ''" },
 		{ "control_port", "mediumint not null default 0" },
 		{ "rpc_version", "mediumint not null default 0" },
-		{ "valid_qos", "blob" },
 		{ NULL, NULL}		
 	};
 
@@ -7250,14 +7502,12 @@ extern List acct_storage_p_get_clusters(mysql_conn_t *mysql_conn, uid_t uid,
 		"control_host",
 		"control_port",
 		"rpc_version",
-		"valid_qos",
 	};
 	enum {
 		CLUSTER_REQ_NAME,
 		CLUSTER_REQ_CH,
 		CLUSTER_REQ_CP,
 		CLUSTER_REQ_VERSION,
-		CLUSTER_REQ_VALID_QOS,
 		CLUSTER_REQ_COUNT
 	};
 
@@ -7344,13 +7594,6 @@ empty:
 		cluster->control_host = xstrdup(row[CLUSTER_REQ_CH]);
 		cluster->control_port = atoi(row[CLUSTER_REQ_CP]);
 		cluster->rpc_version = atoi(row[CLUSTER_REQ_VERSION]);
-		cluster->valid_qos_list = list_create(slurm_destroy_char);
-
-		if(row[CLUSTER_REQ_VALID_QOS] && row[CLUSTER_REQ_VALID_QOS][0])
-			slurm_addto_char_list(assoc->qos_list,
-					      row[CLUSTER_REQ_VALID_QOS]+1);
-		else 
-			list_append(cluster->valid_qos_list, xstrdup("all"));
 	}
 	mysql_free_result(result);
 
@@ -7394,8 +7637,8 @@ empty:
 	list_iterator_destroy(itr);
 	list_iterator_destroy(assoc_itr);
 	if(list_count(assoc_list))
-		info("I have %d left over associations", 
-		     list_count(assoc_list));
+		error("I have %d left over associations", 
+		      list_count(assoc_list));
 	list_destroy(assoc_list);
 
 	return cluster_list;
@@ -7659,13 +7902,6 @@ empty:
 		else
 			assoc->grp_cpu_mins = INFINITE;
 
-		/* get the usage if requested */
-		if(with_usage) {
-			acct_storage_p_get_usage(mysql_conn, uid, assoc,
-						 DBD_GET_ASSOC_USAGE,
-						 assoc_cond->usage_start,
-						 assoc_cond->usage_end);
-		}
 		parent_acct = row[ASSOC_REQ_ACCT];
 		if(!without_parent_info 
 		   && row[ASSOC_REQ_PARENT][0]) {
@@ -7890,6 +8126,12 @@ empty:
 		//info("parent id is %d", assoc->parent_id);
 		//log_assoc_rec(assoc);
 	}
+
+	if(with_usage && assoc_list) 
+		_get_usage_for_list(mysql_conn, DBD_GET_ASSOC_USAGE,
+				    assoc_list, assoc_cond->usage_start,
+				    assoc_cond->usage_end);
+	
 	mysql_free_result(result);
 
 	list_destroy(delta_qos_list);
@@ -8252,17 +8494,15 @@ empty:
 			wckey->name = xstrdup("");
 
 		wckey->cluster = xstrdup(row[WCKEY_REQ_CLUSTER]);
-
-		/* get the usage if requested */
-		if(with_usage) {
-			acct_storage_p_get_usage(mysql_conn, uid, wckey,
-						 DBD_GET_WCKEY_USAGE,
-						 wckey_cond->usage_start,
-						 wckey_cond->usage_end);
-		}		
 	}
 	mysql_free_result(result);
 
+	if(with_usage && wckey_list) 
+		_get_usage_for_list(mysql_conn, DBD_GET_WCKEY_USAGE,
+				    wckey_list, wckey_cond->usage_start,
+				    wckey_cond->usage_end);
+	
+
 	//END_TIMER2("get_wckeys");
 	return wckey_list;
 #else
@@ -8678,11 +8918,7 @@ extern int acct_storage_p_get_usage(mysql_conn_t *mysql_conn, uid_t uid,
 	List *my_list;
 	uint32_t id = NO_VAL;
 
-	char *usage_req_inx[] = {
-		"t1.id",
-		"t1.period_start",
-		"t1.alloc_cpu_secs"
-	};
+	char **usage_req_inx = NULL;
 	
 	enum {
 		USAGE_ID,
@@ -8693,17 +8929,35 @@ extern int acct_storage_p_get_usage(mysql_conn_t *mysql_conn, uid_t uid,
 
 	switch (type) {
 	case DBD_GET_ASSOC_USAGE:
+	{
+		char *temp_usage[] = {
+			"t3.id",
+			"t1.period_start",
+			"t1.alloc_cpu_secs"
+		};
+		usage_req_inx = temp_usage;
+
 		id = acct_assoc->id;
 		username = acct_assoc->user;
 		my_list = &acct_assoc->accounting_list;
 		my_usage_table = assoc_day_table;
 		break;
+	}
 	case DBD_GET_WCKEY_USAGE:
+	{
+		char *temp_usage[] = {
+			"id",
+			"period_start",
+			"alloc_cpu_secs"
+		};
+		usage_req_inx = temp_usage;
+
 		id = acct_wckey->id;
 		username = acct_wckey->user;
 		my_list = &acct_wckey->accounting_list;
 		my_usage_table = wckey_day_table;
 		break;
+	}
 	default:
 		error("Unknown usage type %d", type);
 		return SLURM_ERROR;
@@ -8762,7 +9016,7 @@ extern int acct_storage_p_get_usage(mysql_conn_t *mysql_conn, uid_t uid,
 				if(!acct_assoc->acct) {
 					debug("No account name given "
 					      "in association.");
-					goto bad_user;				
+					goto bad_user;			
 				}
 				
 				itr = list_iterator_create(user.coord_accts);
@@ -8802,13 +9056,13 @@ is_user:
 			"where (t1.period_start < %d && t1.period_start >= %d) "
 			"&& t1.id=t2.id && t3.id=%d && "
 			"t2.lft between t3.lft and t3.rgt "
-			"order by t1.id, period_start;",
+			"order by t3.id, period_start;",
 			tmp, my_usage_table, assoc_table, assoc_table,
 			end, start, id);
 		break;
 	case DBD_GET_WCKEY_USAGE:
 		query = xstrdup_printf(
-			"select %s from %s as t1 "
+			"select %s from %s "
 			"where (period_start < %d && period_start >= %d) "
 			"&& id=%d order by id, period_start;",
 			tmp, my_usage_table, end, start, id);
@@ -9285,12 +9539,16 @@ extern int clusteracct_storage_p_cluster_procs(mysql_conn_t *mysql_conn,
 	}
 	debug("%s has changed from %s cpus to %u", cluster, row[0], procs);   
 
+	/* reset all the entries for this cluster since the procs
+	   changed some of the downed nodes may have gone away.
+	   Request them again with ACCOUNTING_FIRST_REG */
 	query = xstrdup_printf(
 		"update %s set period_end=%d where cluster=\"%s\" "
-		"and period_end=0 and node_name=''",
+		"and period_end=0",
 		event_table, event_time, cluster);
 	rc = mysql_db_query(mysql_conn->db_conn, query);
 	xfree(query);
+	first = 1;
 	if(rc != SLURM_SUCCESS)
 		goto end_it;
 add_it:
@@ -9418,6 +9676,7 @@ extern int jobacct_storage_p_job_start(mysql_conn_t *mysql_conn,
 	int reinit = 0;
 	time_t check_time = job_ptr->start_time;
 	uint32_t wckeyid = 0;
+	int no_cluster = 0;
 
 	if (!job_ptr->details || !job_ptr->details->submit_time) {
 		error("jobacct_storage_p_job_start: "
@@ -9446,6 +9705,12 @@ extern int jobacct_storage_p_job_start(mysql_conn_t *mysql_conn,
 		xfree(query);
 	} else
 		slurm_mutex_unlock(&rollup_lock);
+	
+	if(!cluster_name && job_ptr->assoc_id) {
+		no_cluster = 1;
+		cluster_name = _get_cluster_from_associd(mysql_conn,
+							 job_ptr->assoc_id);
+	}
 
 
 	priority = (job_ptr->priority == NO_VAL) ?
@@ -9458,12 +9723,20 @@ extern int jobacct_storage_p_job_start(mysql_conn_t *mysql_conn,
 		/* then grep for " since that is the delimiter for
 		   the wckey */
 		if((temp = strchr(jname, '\"'))) {
-			/* if we have a wckey set the " to NULL to
-			 * end the jname */
-			temp[0] = '\0';
-			/* increment and copy the remainder */
-			temp++;
-			wckey = xstrdup(temp);
+			if(strrchr(jname, '\"') != temp) {
+				error("job %u has quotes in it's name '%s', "
+				      "no way to get correct wckey", 
+				      job_ptr->job_id, jname);
+				xfree(jname);
+				jname = _fix_double_quotes(job_ptr->name);
+			} else {
+				/* if we have a wckey set the " to NULL to
+				 * end the jname */
+				temp[0] = '\0';
+				/* increment and copy the remainder */
+				temp++;
+				wckey = xstrdup(temp);
+			}
 		}
 	}
 
@@ -9498,6 +9771,7 @@ extern int jobacct_storage_p_job_start(mysql_conn_t *mysql_conn,
 		wckeyid = _get_wckeyid(mysql_conn, &wckey,
 				       job_ptr->user_id, cluster_name,
 				       job_ptr->assoc_id);
+	
 			
 	/* We need to put a 0 for 'end' incase of funky job state
 	 * files from a hot start of the controllers we call
@@ -9615,7 +9889,8 @@ extern int jobacct_storage_p_job_start(mysql_conn_t *mysql_conn,
 	xfree(wckey);
 
 	xfree(query);
-
+	if(no_cluster)
+		xfree(cluster_name);
 	return rc;
 #else
 	return SLURM_ERROR;
@@ -9631,6 +9906,7 @@ extern int jobacct_storage_p_job_complete(mysql_conn_t *mysql_conn,
 #ifdef HAVE_MYSQL
 	char *query = NULL, *nodes = NULL;
 	int rc=SLURM_SUCCESS;
+	time_t start_time = job_ptr->start_time;
 
 	if (!job_ptr->db_index 
 	    && (!job_ptr->details || !job_ptr->details->submit_time)) {
@@ -9649,7 +9925,8 @@ extern int jobacct_storage_p_job_complete(mysql_conn_t *mysql_conn,
 	if (job_ptr->end_time == 0) {
 		debug("mysql_jobacct: job %u never started", job_ptr->job_id);
 		return SLURM_SUCCESS;
-	}	
+	} else if(start_time > job_ptr->end_time)
+		start_time = 0;	
 	
 	slurm_mutex_lock(&rollup_lock);
 	if(job_ptr->end_time < global_last_rollup) {
@@ -9692,7 +9969,7 @@ extern int jobacct_storage_p_job_complete(mysql_conn_t *mysql_conn,
 	query = xstrdup_printf("update %s set start=%d, end=%d, state=%d, "
 			       "nodelist=\"%s\", comp_code=%d, "
 			       "kill_requid=%d where id=%d",
-			       job_table, (int)job_ptr->start_time,
+			       job_table, (int)start_time,
 			       (int)job_ptr->end_time, 
 			       job_ptr->job_state & (~JOB_COMPLETING),
 			       nodes, job_ptr->exit_code,
diff --git a/src/plugins/accounting_storage/mysql/mysql_jobacct_process.c b/src/plugins/accounting_storage/mysql/mysql_jobacct_process.c
index 4e629210f..34e4b04a7 100644
--- a/src/plugins/accounting_storage/mysql/mysql_jobacct_process.c
+++ b/src/plugins/accounting_storage/mysql/mysql_jobacct_process.c
@@ -230,7 +230,7 @@ extern int setup_job_cond_limits(acct_job_cond_t *job_cond, char **extra)
 		while((object = list_next(itr))) {
 			if(set) 
 				xstrcat(*extra, " || ");
-			xstrfmtcat(*extra, "t1.gid=", object);
+			xstrfmtcat(*extra, "t1.gid='%s'", object);
 			set = 1;
 		}
 		list_iterator_destroy(itr);
@@ -283,6 +283,13 @@ extern int setup_job_cond_limits(acct_job_cond_t *job_cond, char **extra)
 			   "(t1.eligible < %d "
 			   "&& (t1.end >= %d || t1.end = 0)))",
 			   job_cond->usage_end, job_cond->usage_start);
+	} else if(job_cond->usage_end) {
+		if(*extra)
+			xstrcat(*extra, " && (");
+		else
+			xstrcat(*extra, " where (");
+		xstrfmtcat(*extra, 
+			   "(t1.eligible < %d))", job_cond->usage_end);
 	}
 
 	if(job_cond->state_list && list_count(job_cond->state_list)) {
@@ -600,7 +607,10 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn, uid_t uid,
 	   easy to look for duplicates 
 	*/
 	if(job_cond && !job_cond->duplicates) 
-		xstrcat(query, " order by jobid, submit desc");
+		xstrcat(query, " order by t1.cluster, jobid, submit desc");
+	else
+		xstrcat(query, " order by t1.cluster, submit desc");
+		
 
 	debug3("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
 	if(!(result = mysql_db_query_ret(
@@ -735,6 +745,9 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn, uid_t uid,
 			job->elapsed -= job->suspended;
 		}
 
+		if((int)job->elapsed < 0)
+			job->elapsed = 0;
+
 		job->jobid = curr_id;
 		job->jobname = xstrdup(row[JOB_REQ_NAME]);
 		job->gid = atoi(row[JOB_REQ_GID]);
@@ -851,6 +864,10 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn, uid_t uid,
 				step->elapsed = step->end - step->start;
 			}
 			step->elapsed -= step->suspended;
+
+			if((int)step->elapsed < 0)
+				step->elapsed = 0;
+
 			step->user_cpu_sec = atoi(step_row[STEP_REQ_USER_SEC]);
 			step->user_cpu_usec =
 				atoi(step_row[STEP_REQ_USER_USEC]);
@@ -915,6 +932,8 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn, uid_t uid,
 					job->track_steps = 1;
 			}
                }
+		/* need to reset here to make the above test valid */
+		step = NULL;
 	}
 	mysql_free_result(result);
 
@@ -1246,10 +1265,11 @@ extern int mysql_jobacct_process_archive(mysql_conn_t *mysql_conn,
 			if (rc)
 				(void) unlink(new_file);
 			else {			/* file shuffle */
+				int ign;	/* avoid warning */
 				(void) unlink(old_file);
-				(void) link(reg_file, old_file);
+				ign =  link(reg_file, old_file);
 				(void) unlink(reg_file);
-				(void) link(new_file, reg_file);
+				ign =   link(new_file, reg_file);
 				(void) unlink(new_file);
 			}
 			xfree(old_file);
@@ -1432,10 +1452,11 @@ exit_steps:
 			if (rc)
 				(void) unlink(new_file);
 			else {			/* file shuffle */
+				int ign;	/* avoid warning */
 				(void) unlink(old_file);
-				(void) link(reg_file, old_file);
+				ign =  link(reg_file, old_file);
 				(void) unlink(reg_file);
-				(void) link(new_file, reg_file);
+				ign =  link(new_file, reg_file);
 				(void) unlink(new_file);
 			}
 			xfree(old_file);
@@ -1520,7 +1541,7 @@ extern int mysql_jobacct_process_archive_load(mysql_conn_t *mysql_conn,
 	}
 	
 	debug3("%d(%d) query\n%s", mysql_conn->conn, __LINE__, data);
-	error_code = mysql_db_query(mysql_conn->db_conn, data);
+	error_code = mysql_db_query_check_after(mysql_conn->db_conn, data);
 	xfree(data);
 	if(error_code != SLURM_SUCCESS) {
 		error("Couldn't load old data");
diff --git a/src/plugins/accounting_storage/mysql/mysql_rollup.c b/src/plugins/accounting_storage/mysql/mysql_rollup.c
index 2fe18d362..d338bd3c0 100644
--- a/src/plugins/accounting_storage/mysql/mysql_rollup.c
+++ b/src/plugins/accounting_storage/mysql/mysql_rollup.c
@@ -304,7 +304,7 @@ extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 				       "|| end = 0)) "
 				       "order by associd, eligible",
 				       job_str, job_table, 
-				       curr_end, curr_start, curr_start);
+				       curr_end, curr_start);
 
 		debug3("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
 		if(!(result = mysql_db_query_ret(
diff --git a/src/plugins/accounting_storage/pgsql/accounting_storage_pgsql.c b/src/plugins/accounting_storage/pgsql/accounting_storage_pgsql.c
index 86c22abed..4d4ab0aa2 100644
--- a/src/plugins/accounting_storage/pgsql/accounting_storage_pgsql.c
+++ b/src/plugins/accounting_storage/pgsql/accounting_storage_pgsql.c
@@ -1164,7 +1164,7 @@ extern int jobacct_storage_p_job_start(PGconn *acct_pgsql_db,
 {
 #ifdef HAVE_PGSQL
 	int	rc=SLURM_SUCCESS;
-	char	*jname, *nodes;
+	char	*jname = NULL, *nodes;
 	long	priority;
 	int track_steps = 0;
 	char *block_id = NULL;
diff --git a/src/plugins/accounting_storage/pgsql/pgsql_jobacct_process.c b/src/plugins/accounting_storage/pgsql/pgsql_jobacct_process.c
index f8d9a3d41..2f5d422dc 100644
--- a/src/plugins/accounting_storage/pgsql/pgsql_jobacct_process.c
+++ b/src/plugins/accounting_storage/pgsql/pgsql_jobacct_process.c
@@ -700,6 +700,8 @@ no_cond:
 					job->track_steps = 1;
 			}
                }
+		/* need to reset here to make the above test valid */
+		step = NULL;
 	}
 	PQclear(result);
 	
diff --git a/src/plugins/accounting_storage/slurmdbd/accounting_storage_slurmdbd.c b/src/plugins/accounting_storage/slurmdbd/accounting_storage_slurmdbd.c
index 841c97119..86093f83b 100644
--- a/src/plugins/accounting_storage/slurmdbd/accounting_storage_slurmdbd.c
+++ b/src/plugins/accounting_storage/slurmdbd/accounting_storage_slurmdbd.c
@@ -1279,6 +1279,7 @@ extern int clusteracct_storage_p_cluster_procs(void *db_conn,
 {
 	slurmdbd_msg_t msg;
 	dbd_cluster_procs_msg_t req;
+	int rc = SLURM_ERROR;
 
 	debug2("Sending info for cluster %s", cluster);
 	req.cluster_name = cluster;
@@ -1287,10 +1288,9 @@ extern int clusteracct_storage_p_cluster_procs(void *db_conn,
 	msg.msg_type     = DBD_CLUSTER_PROCS;
 	msg.data         = &req;
 
-	if (slurm_send_slurmdbd_msg(SLURMDBD_VERSION, &msg) < 0)
-		return SLURM_ERROR;
+	slurm_send_slurmdbd_recv_rc_msg(SLURMDBD_VERSION, &msg, &rc);
 
-	return SLURM_SUCCESS;
+	return rc;
 }
 
 extern int clusteracct_storage_p_register_ctld(void *db_conn,
diff --git a/src/plugins/proctrack/rms/proctrack_rms.c b/src/plugins/proctrack/rms/proctrack_rms.c
index d6f6bd747..93204494f 100644
--- a/src/plugins/proctrack/rms/proctrack_rms.c
+++ b/src/plugins/proctrack/rms/proctrack_rms.c
@@ -93,7 +93,7 @@ extern int slurm_container_create (slurmd_job_t *job)
 	/*
 	 * Return a handle to an existing prgid or create a new one
 	 */
-	if (rms_getprgid (job->jmgr_pid, &prgid) < 0) {
+	if (rms_getprgid ((int) job->jmgr_pid, &prgid) < 0) {
 		int fd = _prg_destructor_fork();
 		/* Use slurmd job-step manager's pid as a unique identifier */
 		prgid = job->jmgr_pid;
diff --git a/src/plugins/sched/backfill/backfill_wrapper.c b/src/plugins/sched/backfill/backfill_wrapper.c
index 013e47351..b21ff971e 100644
--- a/src/plugins/sched/backfill/backfill_wrapper.c
+++ b/src/plugins/sched/backfill/backfill_wrapper.c
@@ -75,7 +75,7 @@ int init( void )
 	}
 
 	slurm_attr_init( &attr );
-	pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_DETACHED );
+	/* since we do a join on this later we don't make it detached */
 	if (pthread_create( &backfill_thread, &attr, backfill_agent, NULL))
 		error("Unable to start backfill thread: %m");
 	pthread_mutex_unlock( &thread_flag_mutex );
@@ -93,6 +93,7 @@ void fini( void )
 	if ( backfill_thread ) {
 		verbose( "Backfill scheduler plugin shutting down" );
 		stop_backfill_agent();
+		pthread_join(backfill_thread, NULL);
 		backfill_thread = 0;
 	}
 	pthread_mutex_unlock( &thread_flag_mutex );
diff --git a/src/plugins/sched/wiki/job_modify.c b/src/plugins/sched/wiki/job_modify.c
index 7cad50ead..263e418bd 100644
--- a/src/plugins/sched/wiki/job_modify.c
+++ b/src/plugins/sched/wiki/job_modify.c
@@ -40,6 +40,7 @@
 #include "src/slurmctld/job_scheduler.h"
 #include "src/slurmctld/locks.h"
 #include "src/slurmctld/slurmctld.h"
+#include "src/common/slurm_accounting_storage.h"
 
 extern void	null_term(char *str)
 {
@@ -60,6 +61,7 @@ static int	_job_modify(uint32_t jobid, char *bank_ptr,
 			uint32_t new_time_limit)
 {
 	struct job_record *job_ptr;
+	bool update_accounting = false;
 
 	job_ptr = find_job_record(jobid);
 	if (job_ptr == NULL) {
@@ -96,9 +98,12 @@ static int	_job_modify(uint32_t jobid, char *bank_ptr,
 		last_job_update = time(NULL);
 	}
 
-	if (bank_ptr &&
-	    (update_job_account("wiki", job_ptr, bank_ptr) != SLURM_SUCCESS)) {
-		return EINVAL;
+	if (bank_ptr) {
+		if(update_job_account("wiki", job_ptr, bank_ptr)
+		   != SLURM_SUCCESS)
+			return EINVAL;
+		else
+			update_accounting = true;
 	}
 
 	if (new_hostlist) {
@@ -147,11 +152,14 @@ static int	_job_modify(uint32_t jobid, char *bank_ptr,
 		}
 
 host_fini:	if (rc) {
-			info("wiki: change job %u invalid hostlist %s", jobid, new_hostlist);
+			info("wiki: change job %u invalid hostlist %s",
+			     jobid, new_hostlist);
 			xfree(job_ptr->details->req_nodes);
 			return EINVAL;
 		} else {
-			info("wiki: change job %u hostlist %s", jobid, new_hostlist);
+			info("wiki: change job %u hostlist %s",
+			     jobid, new_hostlist);
+			update_accounting = true;
 		}
 	}
 
@@ -169,6 +177,7 @@ host_fini:	if (rc) {
 		job_ptr->partition = xstrdup(part_name_ptr);
 		job_ptr->part_ptr = part_ptr;
 		last_job_update = time(NULL);
+		update_accounting = true;
 	}
 	if (new_node_cnt) {
 		if (IS_JOB_PENDING(job_ptr) && job_ptr->details) {
@@ -179,6 +188,7 @@ host_fini:	if (rc) {
 			info("wiki: change job %u min_nodes to %u",
 				jobid, new_node_cnt);
 			last_job_update = time(NULL);
+			update_accounting = true;
 		} else {
 			error("wiki: MODIFYJOB node count of non-pending "
 				"job %u", jobid);
@@ -186,6 +196,14 @@ host_fini:	if (rc) {
 		}
 	}
 
+	if(update_accounting) {
+		if (job_ptr->details && job_ptr->details->begin_time) {
+			/* Update job record in accounting to reflect changes */
+			jobacct_storage_g_job_start(
+				acct_db_conn, slurmctld_cluster_name, job_ptr);
+		}
+	}
+
 	return SLURM_SUCCESS;
 }
 
diff --git a/src/plugins/sched/wiki/msg.c b/src/plugins/sched/wiki/msg.c
index e7f0312eb..4b7ebfd6c 100644
--- a/src/plugins/sched/wiki/msg.c
+++ b/src/plugins/sched/wiki/msg.c
@@ -474,7 +474,7 @@ static char *	_recv_msg(slurm_fd new_fd)
 		return NULL;
 	}
 
-	debug("wiki msg recv:%s", buf);
+	debug2("wiki msg recv:%s", buf);
 	return buf;
 }
 
@@ -488,7 +488,7 @@ static size_t	_send_msg(slurm_fd new_fd, char *buf, size_t size)
 	char header[10];
 	size_t data_sent;
 
-	debug("wiki msg send:%s", buf);
+	debug2("wiki msg send:%s", buf);
 
 	(void) sprintf(header, "%08lu\n", (unsigned long) size);
 	if (_write_bytes((int) new_fd, header, 9) != 9) {
diff --git a/src/plugins/sched/wiki/start_job.c b/src/plugins/sched/wiki/start_job.c
index 22007c628..7be3a0cbc 100644
--- a/src/plugins/sched/wiki/start_job.c
+++ b/src/plugins/sched/wiki/start_job.c
@@ -185,6 +185,20 @@ static int	_start_job(uint32_t jobid, int task_cnt, char *hostlist,
 			goto fini;
 		}
 
+		if (!bit_super_set(new_bitmap, avail_node_bitmap)) {
+			/* Selected node is UP and not responding
+			 * or it just went DOWN */
+			*err_code = -700;
+			*err_msg = "TASKLIST includes non-responsive node";
+			error("wiki: Attempt to use non-responsive nodes for "
+				"job %u, %s",
+				jobid, hostlist);
+			xfree(new_node_list);
+			bit_free(new_bitmap);
+			rc = -1;
+			goto fini;
+		}
+
 		/* User excluded node list incompatable with Wiki
 		 * Exclude all nodes not explicitly requested */
 		FREE_NULL_BITMAP(job_ptr->details->exc_node_bitmap);
diff --git a/src/plugins/sched/wiki2/get_jobs.c b/src/plugins/sched/wiki2/get_jobs.c
index e5a953a8c..96226ae70 100644
--- a/src/plugins/sched/wiki2/get_jobs.c
+++ b/src/plugins/sched/wiki2/get_jobs.c
@@ -439,6 +439,13 @@ static void	_get_job_comment(struct job_record *job_ptr,
 		field_sep = "?";
 	}
 
+	/* JOBFLAGS:RESTARTABLE */
+	if (job_ptr->details && job_ptr->details->requeue) {
+		size += snprintf((buffer + size), (buf_size - size),
+			"%sJOBFLAGS:RESTARTABLE", field_sep);
+		field_sep = "?";
+	}
+
 	/* COMMENT SET BY MOAB */
 	if (job_ptr->comment && job_ptr->comment[0]) {
 		size += snprintf((buffer + size), (buf_size - size),
@@ -487,7 +494,7 @@ static uint32_t	_get_job_max_nodes(struct job_record *job_ptr)
 		return max_nodes;	/* should never reach here */
 
 	if (job_ptr->details->max_nodes) {
-			max_nodes = job_ptr->details->max_nodes;
+		max_nodes = job_ptr->details->max_nodes;
 		if (job_ptr->part_ptr->max_nodes != INFINITE) {
 			max_nodes = MIN(max_nodes, 
 					job_ptr->part_ptr->max_nodes);
diff --git a/src/plugins/sched/wiki2/job_modify.c b/src/plugins/sched/wiki2/job_modify.c
index 8b54ef333..c0c7673ab 100644
--- a/src/plugins/sched/wiki2/job_modify.c
+++ b/src/plugins/sched/wiki2/job_modify.c
@@ -40,6 +40,7 @@
 #include "src/slurmctld/job_scheduler.h"
 #include "src/slurmctld/locks.h"
 #include "src/slurmctld/slurmctld.h"
+#include "src/common/slurm_accounting_storage.h"
 
 /* Given a string, replace the first space found with '\0' */
 extern void	null_term(char *str)
@@ -63,6 +64,7 @@ static int	_job_modify(uint32_t jobid, char *bank_ptr,
 {
 	struct job_record *job_ptr;
 	time_t now = time(NULL);
+	bool update_accounting = false;
 
 	job_ptr = find_job_record(jobid);
 	if (job_ptr == NULL) {
@@ -127,6 +129,7 @@ static int	_job_modify(uint32_t jobid, char *bank_ptr,
 				jobid, begin_time);
 			job_ptr->details->begin_time = begin_time;
 			last_job_update = now;
+			update_accounting = true;
 		} else {
 			error("wiki: MODIFYJOB begin_time of non-pending "
 				"job %u", jobid);
@@ -140,6 +143,7 @@ static int	_job_modify(uint32_t jobid, char *bank_ptr,
 			xfree(job_ptr->name);
 			job_ptr->name = xstrdup(name_ptr);
 			last_job_update = now;
+			update_accounting = true;
 		} else {
 			error("wiki: MODIFYJOB name of non-pending job %u",
 			      jobid);
@@ -200,6 +204,7 @@ host_fini:	if (rc) {
 		} else {
 			info("wiki: change job %u hostlist %s", 
 				jobid, new_hostlist);
+			update_accounting = true;
 		}
 	}
 
@@ -217,6 +222,7 @@ host_fini:	if (rc) {
 		job_ptr->partition = xstrdup(part_name_ptr);
 		job_ptr->part_ptr = part_ptr;
 		last_job_update = now;
+		update_accounting = true;
 	}
 
 	if (new_node_cnt) {
@@ -228,6 +234,7 @@ host_fini:	if (rc) {
 			info("wiki: change job %u min_nodes to %u",
 				jobid, new_node_cnt);
 			last_job_update = now;
+			update_accounting = true;
 		} else {
 			error("wiki: MODIFYJOB node count of non-pending "
 				"job %u", jobid);
@@ -235,6 +242,14 @@ host_fini:	if (rc) {
 		}
 	}
 
+	if(update_accounting) {
+		if (job_ptr->details && job_ptr->details->begin_time) {
+			/* Update job record in accounting to reflect changes */
+			jobacct_storage_g_job_start(
+				acct_db_conn, slurmctld_cluster_name, job_ptr);
+		}
+	}
+
 	return SLURM_SUCCESS;
 }
 
diff --git a/src/plugins/sched/wiki2/job_requeue.c b/src/plugins/sched/wiki2/job_requeue.c
index 444b810ee..a756dcf00 100644
--- a/src/plugins/sched/wiki2/job_requeue.c
+++ b/src/plugins/sched/wiki2/job_requeue.c
@@ -44,6 +44,7 @@ extern int	job_requeue_wiki(char *cmd_ptr, int *err_code, char **err_msg)
 {
 	char *arg_ptr, *tmp_char;
 	uint32_t jobid;
+	struct job_record *job_ptr;
 	static char reply_msg[128];
 	int slurm_rc;
 	/* Write lock on job and node info */
@@ -67,24 +68,23 @@ extern int	job_requeue_wiki(char *cmd_ptr, int *err_code, char **err_msg)
 
 	lock_slurmctld(job_write_lock);
 	slurm_rc = job_requeue(0, jobid, -1);
-	if (slurm_rc == SLURM_SUCCESS) {
-		/* We need to clear the required node list here.
-		 * If the job was submitted with srun and a 
-		 * required node list, it gets lost here. */
-		struct job_record *job_ptr;
-		job_ptr = find_job_record(jobid);
-		if (job_ptr && job_ptr->details) {
-			xfree(job_ptr->details->req_nodes);
-			FREE_NULL_BITMAP(job_ptr->details->
-					 req_node_bitmap);
-		}
-		info("wiki: requeued job %u", jobid);
-	} else {
+	if (slurm_rc != SLURM_SUCCESS) {
+		unlock_slurmctld(job_write_lock);
 		*err_code = -700;
 		*err_msg = slurm_strerror(slurm_rc);
 		error("wiki: Failed to requeue job %u (%m)", jobid);
 		return -1;
 	}
+
+	/* We need to clear the required node list here.
+	 * If the job was submitted with srun and a 
+	 * required node list, it gets lost here. */
+	job_ptr = find_job_record(jobid);
+	if (job_ptr && job_ptr->details) {
+		xfree(job_ptr->details->req_nodes);
+		FREE_NULL_BITMAP(job_ptr->details->req_node_bitmap);
+	}
+	info("wiki: requeued job %u", jobid);
 	unlock_slurmctld(job_write_lock);
 	snprintf(reply_msg, sizeof(reply_msg),
 		"job %u requeued successfully", jobid);
diff --git a/src/plugins/sched/wiki2/job_will_run.c b/src/plugins/sched/wiki2/job_will_run.c
index 3ecca4dfd..27e7d1e45 100644
--- a/src/plugins/sched/wiki2/job_will_run.c
+++ b/src/plugins/sched/wiki2/job_will_run.c
@@ -234,7 +234,8 @@ static char *	_will_run_test(uint32_t *jobid, time_t *start_time,
 		}
 		if (job_ptr->details->exc_node_bitmap) {
 			bit_not(job_ptr->details->exc_node_bitmap);
-			bit_and(avail_bitmap, job_ptr->details->exc_node_bitmap);
+			bit_and(avail_bitmap, 
+				job_ptr->details->exc_node_bitmap);
 			bit_not(job_ptr->details->exc_node_bitmap);
 		}
 		if ((job_ptr->details->req_node_bitmap) &&
diff --git a/src/plugins/sched/wiki2/msg.c b/src/plugins/sched/wiki2/msg.c
index 968f92370..466868415 100644
--- a/src/plugins/sched/wiki2/msg.c
+++ b/src/plugins/sched/wiki2/msg.c
@@ -43,6 +43,11 @@
 
 #define _DEBUG 0
 
+/* When a remote socket closes on AIX, we have seen poll() return EAGAIN
+ * indefinitely for a pending write request. Rather than locking up 
+ * slurmctld's wiki interface, abort after MAX_RETRIES poll() failures. */
+#define MAX_RETRIES 10
+
 static bool thread_running = false;
 static bool thread_shutdown = false;
 static pthread_mutex_t thread_flag_mutex = PTHREAD_MUTEX_INITIALIZER;
@@ -433,8 +438,8 @@ static size_t	_read_bytes(int fd, char *buf, size_t size)
 		rc = poll(&ufds, 1, 10000);	/* 10 sec timeout */
 		if (rc == 0)		/* timed out */
 			break;
-		if ((rc == -1) 		/* some error */
-		&&  ((errno== EINTR) || (errno == EAGAIN)))
+		if ((rc == -1) &&	/* some error */
+		    ((errno== EINTR) || (errno == EAGAIN)))
 			continue;
 		if ((ufds.revents & POLLIN) == 0) /* some poll error */
 			break;
@@ -455,7 +460,7 @@ static size_t	_write_bytes(int fd, char *buf, size_t size)
 	size_t bytes_remaining, bytes_written;
 	char *ptr;
 	struct pollfd ufds;
-	int rc;
+	int rc, retry_cnt = 0;
 
 	bytes_remaining = size;
 	size = 0;
@@ -466,9 +471,15 @@ static size_t	_write_bytes(int fd, char *buf, size_t size)
 		rc = poll(&ufds, 1, 10000);	/* 10 sec timeout */
 		if (rc == 0)		/* timed out */
 			break;
-		if ((rc == -1)  	/* some error */
-		&&  ((errno== EINTR) || (errno == EAGAIN)))
+		if ((rc == -1) &&	/* some error */
+		    ((errno== EINTR) || (errno == EAGAIN))) {
+			if ((retry_cnt++) >= MAX_RETRIES) {
+				error("wiki: repeated poll errors for "
+				      "write: %m");
+				break;
+			}
 			continue;
+		}
 		if ((ufds.revents & POLLOUT) == 0) /* some poll error */
 			break;
 
@@ -518,7 +529,7 @@ static char *	_recv_msg(slurm_fd new_fd)
 		return NULL;
 	}
 
-	debug("wiki msg recv:%s", buf);
+	debug2("wiki msg recv:%s", buf);
 	return buf;
 }
 
@@ -532,7 +543,7 @@ static size_t	_send_msg(slurm_fd new_fd, char *buf, size_t size)
 	char header[10];
 	size_t data_sent;
 
-	debug("wiki msg send:%s", buf);
+	debug2("wiki msg send:%s", buf);
 
 	(void) sprintf(header, "%08lu\n", (unsigned long) size);
 	if (_write_bytes((int) new_fd, header, 9) != 9) {
diff --git a/src/plugins/sched/wiki2/start_job.c b/src/plugins/sched/wiki2/start_job.c
index bc9f296c6..3dfe72a84 100644
--- a/src/plugins/sched/wiki2/start_job.c
+++ b/src/plugins/sched/wiki2/start_job.c
@@ -239,6 +239,20 @@ static int	_start_job(uint32_t jobid, int task_cnt, char *hostlist,
 			goto fini;
 		}
 
+		if (!bit_super_set(new_bitmap, avail_node_bitmap)) {
+			/* Selected node is UP and not responding
+			 * or it just went DOWN */
+			*err_code = -700;
+			*err_msg = "TASKLIST includes non-responsive node";
+			error("wiki: Attempt to use non-responsive nodes for "
+				"job %u, %s",
+				jobid, hostlist);
+			xfree(new_node_list);
+			bit_free(new_bitmap);
+			rc = -1;
+			goto fini;
+		}
+
 		/* User excluded node list incompatable with Wiki
 		 * Exclude all nodes not explicitly requested */
 		FREE_NULL_BITMAP(job_ptr->details->exc_node_bitmap);
diff --git a/src/plugins/select/bluegene/block_allocator/block_allocator.c b/src/plugins/select/bluegene/block_allocator/block_allocator.c
index f317c9882..546771f90 100644
--- a/src/plugins/select/bluegene/block_allocator/block_allocator.c
+++ b/src/plugins/select/bluegene/block_allocator/block_allocator.c
@@ -1,7 +1,7 @@
 /*****************************************************************************\
  *  block_allocator.c - Assorted functions for layout of bluegene blocks, 
  *	 wiring, mapping for smap, etc.
- *  $Id: block_allocator.c 16088 2008-12-29 21:56:17Z jette $
+ *  $Id: block_allocator.c 17225 2009-04-10 19:25:52Z da $
  *****************************************************************************
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -62,16 +62,20 @@ List path = NULL;
 List best_path = NULL;
 int best_count;
 int color_count = 0;
-bool *passthrough = NULL;
+uint16_t *deny_pass = NULL;
 
 /* extern Global */
+my_bluegene_t *bg = NULL;
+uint16_t ba_deny_pass = 0;
 List bp_map_list = NULL;
 char letters[62];
 char colors[6];
 #ifdef HAVE_3D
 int DIM_SIZE[BA_SYSTEM_DIMENSIONS] = {0,0,0};
+int REAL_DIM_SIZE[BA_SYSTEM_DIMENSIONS] = {0,0,0};
 #else
 int DIM_SIZE[BA_SYSTEM_DIMENSIONS] = {0};
+int REAL_DIM_SIZE[BA_SYSTEM_DIMENSIONS] = {0};
 #endif
 
 s_p_options_t bg_conf_file_options[] = {
@@ -88,6 +92,7 @@ s_p_options_t bg_conf_file_options[] = {
 	{"AltCnloadImage", S_P_ARRAY, parse_image, NULL},
 	{"AltIoloadImage", S_P_ARRAY, parse_image, NULL},
 #endif
+	{"DenyPassthrough", S_P_STRING},
 	{"LayoutMode", S_P_STRING},
 	{"MloaderImage", S_P_STRING},
 	{"BridgeAPILogFile", S_P_STRING},
@@ -206,6 +211,7 @@ static int _set_one_dim(int *start, int *end, int *coord);
 /* */
 static void _destroy_geo(void *object);
 
+
 extern char *bg_block_state_string(rm_partition_state_t state)
 {
 	static char tmp[16];
@@ -238,6 +244,28 @@ extern char *bg_block_state_string(rm_partition_state_t state)
 	return tmp;
 }
 
+extern char *ba_passthroughs_string(uint16_t passthrough)
+{
+	char *pass = NULL;
+	if(passthrough & PASS_FOUND_X)
+		xstrcat(pass, "X");
+	if(passthrough & PASS_FOUND_Y) {
+		if(pass)
+			xstrcat(pass, ",Y");
+		else
+			xstrcat(pass, "Y");
+	}
+	if(passthrough & PASS_FOUND_Z) {
+		if(pass)
+			xstrcat(pass, ",Z");
+		else
+			xstrcat(pass, "Z");
+	}
+	
+	return pass;
+}
+
+
 extern int parse_blockreq(void **dest, slurm_parser_enum_t type,
 			  const char *key, const char *value, 
 			  const char *line, char **leftover)
@@ -485,8 +513,11 @@ extern int new_ba_request(ba_request_t* ba_request)
 	geo[X] = ba_request->geometry[X];
 	geo[Y] = ba_request->geometry[Y];
 	geo[Z] = ba_request->geometry[Z];
-	passthrough = &ba_request->passthrough;
-
+	if(ba_request->deny_pass == (uint16_t)NO_VAL) 
+		ba_request->deny_pass = ba_deny_pass;
+	
+	deny_pass = &ba_request->deny_pass;
+	
 	if(geo[X] != (uint16_t)NO_VAL) { 
 		for (i=0; i<BA_SYSTEM_DIMENSIONS; i++){
 			if ((geo[i] < 1) 
@@ -566,7 +597,7 @@ extern int new_ba_request(ba_request_t* ba_request)
 				  ba_request->elongate_geos,
 				  ba_request->rotate);
 		}	
-	startagain:		
+//	startagain:		
 		picked=0;
 		for(i=0;i<8;i++)
 			checked[i]=0;
@@ -617,9 +648,21 @@ extern int new_ba_request(ba_request_t* ba_request)
 							break;
 					}		
 				}				
+				/* This size can not be made into a
+				   block return.  If you want to try
+				   until we find the next largest block
+				   uncomment the code below and the goto
+				   above. If a user specifies a max
+				   node count the job will never
+				   run.  
+				*/
 				if(i2==1) {
-					ba_request->size +=1;
-					goto startagain;
+					error("Can't make a block of "
+					      "%d into a cube.",
+					      ba_request->size);
+					return 0;
+/* 					ba_request->size +=1; */
+/* 					goto startagain; */
 				}
 						
 			} else {
@@ -856,7 +899,6 @@ extern void ba_init(node_info_msg_t *node_info_ptr)
 	int end[BA_SYSTEM_DIMENSIONS];
 	
 #ifdef HAVE_BG_FILES
-	my_bluegene_t *bg = NULL;
 	rm_size3D_t bp_size;
 	int rc = 0;
 #endif /* HAVE_BG_FILES */
@@ -943,6 +985,10 @@ extern void ba_init(node_info_msg_t *node_info_ptr)
 		DIM_SIZE[X]++;
 		DIM_SIZE[Y]++;
 		DIM_SIZE[Z]++;
+		/* this will probably be reset below */
+		REAL_DIM_SIZE[X] = DIM_SIZE[X];
+		REAL_DIM_SIZE[Y] = DIM_SIZE[Y];
+		REAL_DIM_SIZE[Z] = DIM_SIZE[Z];
 #else
 		DIM_SIZE[X] = node_info_ptr->record_count;
 #endif
@@ -951,28 +997,6 @@ extern void ba_init(node_info_msg_t *node_info_ptr)
 #ifdef HAVE_3D
 node_info_error:
 
-#ifdef HAVE_BG_FILES
-	if (have_db2
-	    && ((DIM_SIZE[X]==0) || (DIM_SIZE[Y]==0) || (DIM_SIZE[Z]==0))) {
-		if ((rc = bridge_get_bg(&bg)) != STATUS_OK) {
-			error("bridge_get_BG(): %d", rc);
-			return;
-		}
-		
-		if ((bg != NULL)
-		&&  ((rc = bridge_get_data(bg, RM_Msize, &bp_size)) 
-		     == STATUS_OK)) {
-			DIM_SIZE[X]=bp_size.X;
-			DIM_SIZE[Y]=bp_size.Y;
-			DIM_SIZE[Z]=bp_size.Z;
-		} else {
-			error("bridge_get_data(RM_Msize): %d", rc);	
-		}
-		if ((rc = bridge_free_bg(bg)) != STATUS_OK)
-			error("bridge_free_BG(): %d", rc);
-	}
-#endif
-
 	if ((DIM_SIZE[X]==0) || (DIM_SIZE[Y]==0) || (DIM_SIZE[Z]==0)) {
 		debug("Setting dimensions from slurm.conf file");
 		count = slurm_conf_nodename_array(&ptr_array);
@@ -1025,9 +1049,58 @@ node_info_error:
 		DIM_SIZE[X]++;
 		DIM_SIZE[Y]++;
 		DIM_SIZE[Z]++;
+		/* this will probably be reset below */
+		REAL_DIM_SIZE[X] = DIM_SIZE[X];
+		REAL_DIM_SIZE[Y] = DIM_SIZE[Y];
+		REAL_DIM_SIZE[Z] = DIM_SIZE[Z];
 	}
-	debug("DIM_SIZE = %c%c%c\n", alpha_num[DIM_SIZE[X]],
-	      alpha_num[DIM_SIZE[Y]], alpha_num[DIM_SIZE[Z]]);
+#ifdef HAVE_BG_FILES
+	/* sanity check.  We can only request part of the system, but
+	   we don't want to allow more than we have. */
+	if (have_db2) {
+		verbose("Attempting to contact MMCS");
+		if ((rc = bridge_get_bg(&bg)) != STATUS_OK) {
+			fatal("bridge_get_BG() failed.  This usually means "
+			      "there is something wrong with the database.  "
+			      "You might want to run slurmctld in daemon "
+			      "mode (-D) to see what the real error from "
+			      "the api was.  The return code was %d", rc);
+			return;
+		}
+		
+		if ((bg != NULL)
+		&&  ((rc = bridge_get_data(bg, RM_Msize, &bp_size)) 
+		     == STATUS_OK)) {
+			verbose("BlueGene configured with "
+				"%d x %d x %d base blocks", 
+				bp_size.X, bp_size.Y, bp_size.Z);
+			REAL_DIM_SIZE[X] = bp_size.X;
+			REAL_DIM_SIZE[Y] = bp_size.Y;
+			REAL_DIM_SIZE[Z] = bp_size.Z;
+			if((DIM_SIZE[X] > bp_size.X)
+			   || (DIM_SIZE[Y] > bp_size.Y)
+			   || (DIM_SIZE[Z] > bp_size.Z)) {
+				fatal("You requested a %c%c%c system, "
+				      "but we only have a system of %c%c%c.  "
+				      "Change your slurm.conf.",
+				      alpha_num[DIM_SIZE[X]],
+				      alpha_num[DIM_SIZE[Y]],
+				      alpha_num[DIM_SIZE[Z]],
+				      alpha_num[bp_size.X],
+				      alpha_num[bp_size.Y],
+				      alpha_num[bp_size.Z]);
+			}
+		} else {
+			error("bridge_get_data(RM_Msize): %d", rc);	
+		}
+	}
+#endif
+
+
+	debug("We are using %c x %c x %c of the system.", 
+	      alpha_num[DIM_SIZE[X]],
+	      alpha_num[DIM_SIZE[Y]],
+	      alpha_num[DIM_SIZE[Z]]);
 	
 #else 
 	if (DIM_SIZE[X]==0) {
@@ -1120,6 +1193,9 @@ extern void ba_fini()
 		best_path = NULL;
 	}
 #ifdef HAVE_BG_FILES
+	if(bg)
+		bridge_free_bg(bg);
+
 	if (bp_map_list) {
 		list_destroy(bp_map_list);
 		bp_map_list = NULL;
@@ -1529,13 +1605,23 @@ extern char *set_bg_block(List results, int *start,
 	/* This midplane should have already been checked if it was in
 	   use or not */
 	list_append(results, ba_node);
-	if(conn_type == SELECT_SMALL) {
+	if(conn_type >= SELECT_SMALL) {
 		/* adding the ba_node and ending */
 		ba_node->used = true;
 		name = xstrdup_printf("%c%c%c",
 				      alpha_num[ba_node->coord[X]],
 				      alpha_num[ba_node->coord[Y]],
 				      alpha_num[ba_node->coord[Z]]);
+		if(ba_node->letter == '.') {
+			ba_node->letter = letters[color_count%62];
+			ba_node->color = colors[color_count%6];
+			debug3("count %d setting letter = %c "
+			       "color = %d",
+			       color_count,
+			       ba_node->letter,
+			       ba_node->color);
+			color_count++;
+		}
 		goto end_it; 
 	}
 	found = _find_x_path(results, ba_node,
@@ -1959,7 +2045,6 @@ extern char *bg_err_str(status_t inx)
 extern int set_bp_map(void)
 {
 #ifdef HAVE_BG_FILES
-	static my_bluegene_t *bg = NULL;
 	int rc;
 	rm_BP_t *my_bp = NULL;
 	ba_bp_map_t *bp_map = NULL;
@@ -1978,15 +2063,19 @@ extern int set_bp_map(void)
 		return -1;
 	}
 
+#ifdef HAVE_BGL
 	if (!getenv("DB2INSTANCE") || !getenv("VWSPATH")) {
-		fatal("Missing DB2INSTANCE or VWSPATH env var."
-			"Execute 'db2profile'");
+		fatal("Missing DB2INSTANCE or VWSPATH env var.  "
+		      "Execute 'db2profile'");
 		return -1;
 	}
+#endif
 	
-	if ((rc = bridge_get_bg(&bg)) != STATUS_OK) {
-		error("bridge_get_BG(): %d", rc);
-		return -1;
+	if (!bg) {
+		if((rc = bridge_get_bg(&bg)) != STATUS_OK) {
+			error("bridge_get_BG(): %d", rc);
+			return -1;
+		}
 	}
 	
 	if ((rc = bridge_get_data(bg, RM_BPNum, &bp_num)) != STATUS_OK) {
@@ -2052,10 +2141,6 @@ extern int set_bp_map(void)
 		
 		free(bp_id);		
 	}
-
-	if ((rc = bridge_free_bg(bg)) != STATUS_OK)
-		error("bridge_free_BG(): %s", rc);	
-	
 #endif
 	_bp_map_initialized = true;
 	return 1;
@@ -2070,17 +2155,40 @@ extern int *find_bp_loc(char* bp_id)
 #ifdef HAVE_BG_FILES
 	ba_bp_map_t *bp_map = NULL;
 	ListIterator itr;
-	
+	char *check = NULL;
+
 	if(!bp_map_list) {
 		if(set_bp_map() == -1)
 			return NULL;
 	}
+
+	check = xstrdup(bp_id);
+	/* with BGP they changed the names of the rack midplane action from
+	 * R000 to R00-M0 so we now support both formats for each of the
+	 * systems */
+#ifdef HAVE_BGL
+	if(check[3] == '-') {
+		if(check[5]) {
+			check[3] = check[5];
+			check[4] = '\0';
+		}
+	}
+#else
+	if(check[3] != '-') {
+		xfree(check);
+		check = xstrdup_printf("R%c%c-M%c",
+				       bp_id[1], bp_id[2], bp_id[3]);
+	}
+#endif
+
 	itr = list_iterator_create(bp_map_list);
-	while ((bp_map = list_next(itr)) != NULL)
-		if (!strcasecmp(bp_map->bp_id, bp_id)) 
+	while ((bp_map = list_next(itr)))  
+		if (!strcasecmp(bp_map->bp_id, check)) 
 			break;	/* we found it */
-	
 	list_iterator_destroy(itr);
+
+	xfree(check);
+
 	if(bp_map != NULL)
 		return bp_map->coord;
 	else
@@ -2599,6 +2707,42 @@ end_it:
 	
 }
 
+/* */
+extern int validate_coord(int *coord)
+{
+#ifdef HAVE_BG_FILES
+	if(coord[X]>=REAL_DIM_SIZE[X] 
+	   || coord[Y]>=REAL_DIM_SIZE[Y]
+	   || coord[Z]>=REAL_DIM_SIZE[Z]) {
+		error("got coord %c%c%c greater than system dims "
+		      "%c%c%c",
+		      alpha_num[coord[X]],
+		      alpha_num[coord[Y]],
+		      alpha_num[coord[Z]],
+		      alpha_num[REAL_DIM_SIZE[X]],
+		      alpha_num[REAL_DIM_SIZE[Y]],
+		      alpha_num[REAL_DIM_SIZE[Z]]);
+		return 0;
+	}
+
+	if(coord[X]>=DIM_SIZE[X] 
+	   || coord[Y]>=DIM_SIZE[Y]
+	   || coord[Z]>=DIM_SIZE[Z]) {
+		debug4("got coord %c%c%c greater than what we are using "
+		       "%c%c%c",
+		       alpha_num[coord[X]],
+		       alpha_num[coord[Y]],
+		       alpha_num[coord[Z]],
+		       alpha_num[DIM_SIZE[X]],
+		       alpha_num[DIM_SIZE[Y]],
+		       alpha_num[DIM_SIZE[Z]]);
+		return 0;
+	}
+#endif
+	return 1;
+}
+
+
 /********************* Local Functions *********************/
 
 #ifdef HAVE_BG
@@ -2727,7 +2871,9 @@ static int _append_geo(int *geometry, List geos, int rotate)
 	if(rotate) {
 		for (i = (BA_SYSTEM_DIMENSIONS - 1); i >= 0; i--) {
 			for (j = 1; j <= i; j++) {
-				if (geometry[j-1] > geometry[j]) {
+				if ((geometry[j-1] > geometry[j])
+				    && (geometry[j] <= DIM_SIZE[j-i])
+				    && (geometry[j-1] <= DIM_SIZE[j])) {
 					temp_geo = geometry[j-1];
 					geometry[j-1] = geometry[j];
 					geometry[j] = temp_geo;
@@ -2760,7 +2906,7 @@ static int _append_geo(int *geometry, List geos, int rotate)
 
 /*
  * Fill in the paths and extra midplanes we need for the block.
- * Basically copy the x path sent in with the start_list in each Y anx
+ * Basically copy the x path sent in with the start_list in each Y and
  * Z dimension filling in every midplane for the block and then
  * completing the Y and Z wiring, tying the whole block together.
  *
@@ -2795,14 +2941,12 @@ static int _fill_in_coords(List results, List start_list,
 		curr_switch = &check_node->axis_switch[X];
 	
 		for(y=0; y<geometry[Y]; y++) {
-			if((check_node->coord[Y]+y) 
-			   >= DIM_SIZE[Y]) {
+			if((check_node->coord[Y]+y) >= DIM_SIZE[Y]) {
 				rc = 0;
 				goto failed;
 			}
 			for(z=0; z<geometry[Z]; z++) {
-				if((check_node->coord[Z]+z) 
-				   >= DIM_SIZE[Z]) {
+				if((check_node->coord[Z]+z) >= DIM_SIZE[Z]) {
 					rc = 0;
 					goto failed;
 				}
@@ -2853,6 +2997,19 @@ static int _fill_in_coords(List results, List start_list,
 			goto failed;
 		}
 	}
+
+	if(deny_pass) {
+		if((*deny_pass & PASS_DENY_Y)
+		   && (*deny_pass & PASS_FOUND_Y)) {
+			debug("We don't allow Y passthoughs");
+			rc = 0;
+		} else if((*deny_pass & PASS_DENY_Z)
+		   && (*deny_pass & PASS_FOUND_Z)) {
+			debug("We don't allow Z passthoughs");
+			rc = 0;
+		}
+	}
+
 failed:
 	list_iterator_destroy(itr);				
 				
@@ -3056,7 +3213,16 @@ static int _find_yz_path(ba_node_t *ba_node, int *first,
 					dim_curr_switch->int_wire[2].port_tar
 						= 0;
 					dim_curr_switch = dim_next_switch;
-									
+
+					if(deny_pass
+					   && (node_tar[i2] != first[i2])) {
+						if(i2 == 1) 
+							*deny_pass |=
+								PASS_FOUND_Y;
+						else 
+							*deny_pass |=
+								PASS_FOUND_Z;
+					}
 					while(node_tar[i2] != first[i2]) {
 						debug3("on dim %d at %d "
 						       "looking for %d",
@@ -3070,6 +3236,7 @@ static int _find_yz_path(ba_node_t *ba_node, int *first,
 							       "here 3");
 							return 0;
 						} 
+						
 						dim_curr_switch->
 							int_wire[2].used = 1;
 						dim_curr_switch->
@@ -3147,7 +3314,7 @@ static int _find_yz_path(ba_node_t *ba_node, int *first,
 				      geometry[i2], i2, count);
 				return 0;
 			}
-		} else if(geometry[i2] == 1) {
+		} else if((geometry[i2] == 1) && (conn_type == SELECT_TORUS)) {
 			/* FIX ME: This is put here because we got
 			   into a state where the Y dim was not being
 			   processed correctly.  This will set up the
@@ -3643,26 +3810,40 @@ static int _set_external_wires(int dim, int count, ba_node_t* source,
 			       ba_node_t* target)
 {
 #ifdef HAVE_BG_FILES
-	my_bluegene_t *bg = NULL;
+#ifdef HAVE_BGL
+
+#define UNDER_POS  7
+#define NODE_LEN 5
+#define VAL_NAME_LEN 12
+
+#else
+
+#define UNDER_POS  9
+#define NODE_LEN 7
+#define VAL_NAME_LEN 16
+
+#endif
 	int rc;
 	int i;
 	rm_wire_t *my_wire = NULL;
 	rm_port_t *my_port = NULL;
 	char *wire_id = NULL;
-	char from_node[5];
-	char to_node[5];
 	int from_port, to_port;
 	int wire_num;
 	int *coord;
+	char from_node[NODE_LEN];
+	char to_node[NODE_LEN];
 
 	if (!have_db2) {
 		error("Can't access DB2 library, run from service node");
 		return -1;
 	}
 	
-	if ((rc = bridge_get_bg(&bg)) != STATUS_OK) {
-		error("bridge_get_BG(): %d", rc);
-		return -1;
+	if (!bg) {
+		if((rc = bridge_get_bg(&bg)) != STATUS_OK) {
+			error("bridge_get_BG(): %d", rc);
+			return -1;
+		}
 	}
 		
 	if (bg == NULL) 
@@ -3700,7 +3881,7 @@ static int _set_external_wires(int dim, int count, ba_node_t* source,
 			continue;
 		}
 
-		if(wire_id[7] != '_') 
+		if(wire_id[UNDER_POS] != '_') 
 			continue;
 		switch(wire_id[0]) {
 		case 'X':
@@ -3713,17 +3894,17 @@ static int _set_external_wires(int dim, int count, ba_node_t* source,
 			dim = Z;
 			break;
 		}
-		if(strlen(wire_id)<12) {
+		if(strlen(wire_id) < VAL_NAME_LEN) {
 			error("Wire_id isn't correct %s",wire_id);
 			continue;
 		}
-		strncpy(from_node, wire_id+2, 4);
-		strncpy(to_node, wire_id+8, 4);
 		
+                memset(&from_node, 0, sizeof(from_node));
+                memset(&to_node, 0, sizeof(to_node));
+                strncpy(from_node, wire_id+2, NODE_LEN-1);
+                strncpy(to_node, wire_id+UNDER_POS+1, NODE_LEN-1);
 		free(wire_id);
-		
-		from_node[4] = '\0';
-		to_node[4] = '\0';
+
 		if ((rc = bridge_get_data(my_wire, RM_WireFromPort, &my_port))
 		    != STATUS_OK) {
 			error("bridge_get_data(RM_FirstWire): %d", rc);
@@ -3750,20 +3931,9 @@ static int _set_external_wires(int dim, int count, ba_node_t* source,
 			error("1 find_bp_loc: bpid %s not known", from_node);
 			continue;
 		}
-		
-		if(coord[X]>=DIM_SIZE[X] 
-		   || coord[Y]>=DIM_SIZE[Y]
-		   || coord[Z]>=DIM_SIZE[Z]) {
-			error("got coord %c%c%c greater than system dims "
-			      "%c%c%c",
-			      alpha_num[coord[X]],
-			      alpha_num[coord[Y]],
-			      alpha_num[coord[Z]],
-			      alpha_num[DIM_SIZE[X]],
-			      alpha_num[DIM_SIZE[Y]],
-			      alpha_num[DIM_SIZE[Z]]);
+		if(!validate_coord(coord))
 			continue;
-		}
+
 		source = &ba_system_ptr->
 			grid[coord[X]][coord[Y]][coord[Z]];
 		coord = find_bp_loc(to_node);
@@ -3771,19 +3941,9 @@ static int _set_external_wires(int dim, int count, ba_node_t* source,
 			error("2 find_bp_loc: bpid %s not known", to_node);
 			continue;
 		}
-		if(coord[X]>=DIM_SIZE[X] 
-		   || coord[Y]>=DIM_SIZE[Y]
-		   || coord[Z]>=DIM_SIZE[Z]) {
-			error("got coord %c%c%c greater than system dims "
-			      "%c%c%c",
-			      alpha_num[coord[X]],
-			      alpha_num[coord[Y]],
-			      alpha_num[coord[Z]],
-			      alpha_num[DIM_SIZE[X]],
-			      alpha_num[DIM_SIZE[Y]],
-			      alpha_num[DIM_SIZE[Z]]);
+		if(!validate_coord(coord))
 			continue;
-		}
+
 		target = &ba_system_ptr->
 			grid[coord[X]][coord[Y]][coord[Z]];
 		_switch_config(source, 
@@ -3803,9 +3963,6 @@ static int _set_external_wires(int dim, int count, ba_node_t* source,
 		       alpha_num[target->coord[Z]],
 		       _port_enum(to_port));
 	}
-	if ((rc = bridge_free_bg(bg)) != STATUS_OK)
-		error("bridge_free_BG(): %s", rc);
-	
 #else
 
 	_switch_config(source, source, dim, 0, 0);
@@ -4100,10 +4257,16 @@ static int _find_x_path(List results, ba_node_t *ba_node,
 
 	/* we don't need to go any further */
 	if(x_size == 1) {
-		curr_switch->int_wire[source_port].used = 1;
-		curr_switch->int_wire[source_port].port_tar = target_port;
-		curr_switch->int_wire[target_port].used = 1;
-		curr_switch->int_wire[target_port].port_tar = source_port;
+		/* Only set this if Torus since mesh doesn't have any
+		 * connections in this path */
+		if(conn_type == SELECT_TORUS) {
+			curr_switch->int_wire[source_port].used = 1;
+			curr_switch->int_wire[source_port].port_tar = 
+				target_port;
+			curr_switch->int_wire[target_port].used = 1;
+			curr_switch->int_wire[target_port].port_tar = 
+				source_port;
+		}
 		return 1;
 	}
 
@@ -4204,6 +4367,11 @@ static int _find_x_path(List results, ba_node_t *ba_node,
 			} else if(found == x_size) {
 				debug2("Algo(%d) finishing the torus!", algo);
 
+				if(deny_pass && (*deny_pass & PASS_DENY_X)) {
+					info("we don't allow passthroughs 1");
+					return 0;
+				}
+
 				if(best_path)
 					list_flush(best_path);
 				else
@@ -4358,7 +4526,13 @@ static int _find_x_path(List results, ba_node_t *ba_node,
 			debug2("Algo(%d) yes found next free %d", algo,
 			       best_count);
 			node_tar = _set_best_path();
-			
+
+			if(deny_pass && (*deny_pass & PASS_DENY_X)
+			   && (*deny_pass & PASS_FOUND_X)) {
+				debug("We don't allow X passthoughs.");
+				return 0;
+			}
+
 			next_node = &ba_system_ptr->grid[node_tar[X]]
 #ifdef HAVE_3D
 				[node_tar[Y]]
@@ -4776,9 +4950,9 @@ static int *_set_best_path()
 
 	itr = list_iterator_create(best_path);
 	while((path_switch = (ba_path_switch_t*) list_next(itr))) {
-		if(passthrough && path_switch->in > 1 && path_switch->out > 1) {
-			*passthrough = true;
-			debug2("got a passthrough");
+		if(deny_pass && path_switch->in > 1 && path_switch->out > 1) {
+			*deny_pass |= PASS_FOUND_X;
+			debug2("got a passthrough in X");
 		}
 #ifdef HAVE_3D
 		debug3("mapping %c%c%c %d->%d",
@@ -4837,7 +5011,8 @@ static int _set_one_dim(int *start, int *end, int *coord)
 	return 1;
 }
 
-static void _destroy_geo(void *object) {
+static void _destroy_geo(void *object) 
+{
 	int *geo_ptr = (int *)object;
 	xfree(geo_ptr);
 }
diff --git a/src/plugins/select/bluegene/block_allocator/block_allocator.h b/src/plugins/select/bluegene/block_allocator/block_allocator.h
index 39dc60aa9..45aea50ca 100644
--- a/src/plugins/select/bluegene/block_allocator/block_allocator.h
+++ b/src/plugins/select/bluegene/block_allocator/block_allocator.h
@@ -52,6 +52,16 @@
 #define BA_SYSTEM_DIMENSIONS 1
 #endif
 
+#define PASS_DENY_X 0x0001
+#define PASS_DENY_Y 0x0002
+#define PASS_DENY_Z 0x0004
+#define PASS_DENY_ALL 0x00ff
+
+#define PASS_FOUND_X 0x0100
+#define PASS_FOUND_Y 0x0200
+#define PASS_FOUND_Z 0x0400
+#define PASS_FOUND_ANY 0xff00
+
 extern bool _initialized;
 
 enum {X, Y, Z};
@@ -92,8 +102,11 @@ typedef struct {
 	int geometry[BA_SYSTEM_DIMENSIONS]; /* size of block in geometry */
 	char *linuximage;              /* LinuxImage for this block */
 	char *mloaderimage;            /* mloaderImage for this block */
-	bool passthrough;              /* filled in if there are
-					  passthroughs in the block created */
+	uint16_t deny_pass;            /* PASSTHROUGH_FOUND is set if there are
+					  passthroughs in the block
+					  created you can deny
+					  passthroughs by setting the
+					  appropriate bits*/
 	int procs;                     /* Number of Real processors in
 					  block */
 	char *ramdiskimage;            /* RamDiskImage for this block */
@@ -249,6 +262,7 @@ typedef struct {
 } ba_bp_map_t;
 
 /* Global */
+extern my_bluegene_t *bg;
 extern List bp_map_list; /* list used for conversion from XYZ to Rack
 			  * midplane */
 extern char letters[62]; /* complete list of letters used in smap */
@@ -257,10 +271,14 @@ extern int DIM_SIZE[BA_SYSTEM_DIMENSIONS]; /* how many midplanes in
 					    * each dimension */
 extern s_p_options_t bg_conf_file_options[]; /* used to parse the
 					      * bluegene.conf file. */
+extern uint16_t ba_deny_pass;
 
 /* Translate a state enum to a readable string */
 extern char *bg_block_state_string(rm_partition_state_t state);
 
+/* must xfree return of this */
+extern char *ba_passthroughs_string(uint16_t passthrough);
+
 /* Parse a block request from the bluegene.conf file */
 extern int parse_blockreq(void **dest, slurm_parser_enum_t type,
 			  const char *key, const char *value, 
@@ -517,4 +535,8 @@ extern int load_block_wiring(char *bg_block_id);
  */
 extern List get_and_set_block_wiring(char *bg_block_id);
 
+/* make sure a node is in the system return 1 if it is 0 if not */
+extern int validate_coord(int *coord);
+
+
 #endif /* _BLOCK_ALLOCATOR_H_ */
diff --git a/src/plugins/select/bluegene/plugin/bg_block_info.c b/src/plugins/select/bluegene/plugin/bg_block_info.c
index 94c7b012b..5bc20ac15 100644
--- a/src/plugins/select/bluegene/plugin/bg_block_info.c
+++ b/src/plugins/select/bluegene/plugin/bg_block_info.c
@@ -1,7 +1,7 @@
 /*****************************************************************************\
  *  bg_block_info.c - bluegene block information from the db2 database.
  *
- *  $Id: bg_block_info.c 15717 2008-11-17 23:20:37Z da $
+ *  $Id: bg_block_info.c 17202 2009-04-09 16:56:23Z da $
  *****************************************************************************
  *  Copyright (C) 2004-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -64,6 +64,8 @@
 #include "src/common/xstring.h"
 #include "src/slurmctld/proc_req.h"
 #include "src/api/job_info.h"
+#include "src/slurmctld/trigger_mgr.h"
+#include "src/slurmctld/locks.h"
 #include "bluegene.h"
 
 #define _DEBUG 0
@@ -87,7 +89,6 @@ static int _block_is_deallocating(bg_record_t *bg_record)
 
 	if(bg_record->modifying)
 		return SLURM_SUCCESS;
-
 	
 	user_name = xstrdup(bg_slurm_user_name);
 	if(remove_all_users(bg_record->bg_block_id, NULL) 
@@ -95,13 +96,12 @@ static int _block_is_deallocating(bg_record_t *bg_record)
 		error("Something happened removing "
 		      "users from block %s", 
 		      bg_record->bg_block_id);
-	} 
-	
+	} 	
 	
 	if(bg_record->target_name && bg_record->user_name) {
 		if(!strcmp(bg_record->target_name, user_name)) {
 			if(strcmp(bg_record->target_name, bg_record->user_name)
-			   || (jobid > -1)) {
+			   || (jobid > NO_JOB_RUNNING)) {
 				kill_job_struct_t *freeit =
 					xmalloc(sizeof(freeit));
 				freeit->jobid = jobid;
@@ -113,13 +113,6 @@ static int _block_is_deallocating(bg_record_t *bg_record)
 				      bg_record->bg_block_id,
 				      bg_record->user_name,
 				      jobid);
-
-				if(remove_from_bg_list(bg_job_block_list, 
-						       bg_record) 
-				   == SLURM_SUCCESS) {
-					num_unused_cpus += bg_record->bp_count
-						* bg_record->cpus_per_bp;
-				} 
 			} else {
 				debug("Block %s was in a ready state "
 				      "but is being freed. No job running.",
@@ -130,7 +123,6 @@ static int _block_is_deallocating(bg_record_t *bg_record)
 			      "for block %s.",
 			      bg_record->bg_block_id);
 		}
-		remove_from_bg_list(bg_booted_block_list, bg_record);
 	} else if(bg_record->user_name) {
 		error("Target Name was not set "
 		      "not set for block %s.",
@@ -144,6 +136,10 @@ static int _block_is_deallocating(bg_record_t *bg_record)
 		bg_record->target_name = xstrdup(bg_record->user_name);
 	}
 
+	if(remove_from_bg_list(bg_job_block_list, bg_record) == SLURM_SUCCESS) 
+		num_unused_cpus += bg_record->cpu_cnt;			       
+	remove_from_bg_list(bg_booted_block_list, bg_record);
+
 	xfree(user_name);
 			
 	return SLURM_SUCCESS;
@@ -201,6 +197,8 @@ extern int block_ready(struct job_record *job_ptr)
 		xfree(block_id);
 	} else
 		rc = READY_JOB_ERROR;
+/* 	info("returning %d for job %u %d %d", */
+/* 	     rc, job_ptr->job_id, READY_JOB_ERROR, READY_JOB_FATAL); */
 	return rc;
 }				
 
@@ -215,8 +213,8 @@ extern void pack_block(bg_record_t *bg_record, Buf buffer)
 	pack16((uint16_t)bg_record->conn_type, buffer);
 #ifdef HAVE_BGL
 	pack16((uint16_t)bg_record->node_use, buffer);	
-	pack16((uint16_t)bg_record->quarter, buffer);	
-	pack16((uint16_t)bg_record->nodecard, buffer);	
+	pack16((uint16_t)0, buffer);	
+	pack16((uint16_t)0, buffer);	
 #endif
 	pack32((uint32_t)bg_record->node_cnt, buffer);
 	pack_bit_fmt(bg_record->bitmap, buffer);
@@ -244,6 +242,8 @@ extern int update_block_list()
 	time_t now;
 	kill_job_struct_t *freeit = NULL;
 	ListIterator itr = NULL;
+	slurmctld_lock_t job_write_lock = {
+		NO_LOCK, WRITE_LOCK, WRITE_LOCK, NO_LOCK };
 	
 	if(!kill_job_list)
 		kill_job_list = list_create(_destroy_kill_struct);
@@ -300,6 +300,50 @@ extern int update_block_list()
 			bg_record->node_use = node_use;
 			updated = 1;
 		}
+#else
+		if((bg_record->node_cnt < bluegene_bp_node_cnt) 
+		   || (bluegene_bp_node_cnt == bluegene_nodecard_node_cnt)) {
+			char *mode = NULL;
+			uint16_t conn_type = SELECT_SMALL;
+			if ((rc = bridge_get_data(block_ptr,
+						  RM_PartitionOptions,
+						  &mode))
+			    != STATUS_OK) {
+				error("bridge_get_data(RM_PartitionOptions): "
+				      "%s", bg_err_str(rc));
+				updated = -1;
+				goto next_block;
+			} else if(mode) {
+				switch(mode[0]) {
+				case 's':
+					conn_type = SELECT_HTC_S;
+					break;
+				case 'd':
+					conn_type = SELECT_HTC_D;
+					break;
+				case 'v':
+					conn_type = SELECT_HTC_V;
+					break;
+				case 'l':
+					conn_type = SELECT_HTC_L;
+					break;
+				default:
+					conn_type = SELECT_SMALL;
+					break;
+				}
+				free(mode);
+			}
+			
+			if(bg_record->conn_type != conn_type) {
+				debug("mode of small Block %s was %u "
+				      "and now is %u",
+				      bg_record->bg_block_id, 
+				      bg_record->conn_type, 
+				      conn_type);
+				bg_record->conn_type = conn_type;
+				updated = 1;
+			}
+		}
 #endif		
 		if ((rc = bridge_get_data(block_ptr, RM_PartitionState,
 					  &state))
@@ -320,18 +364,56 @@ extern int update_block_list()
 			   check to make sure block went 
 			   through freeing correctly 
 			*/
-			if(bg_record->state != RM_PARTITION_DEALLOCATING
+			if((bg_record->state != RM_PARTITION_DEALLOCATING
+			    && bg_record->state != RM_PARTITION_ERROR)
 			   && state == RM_PARTITION_FREE)
 				skipped_dealloc = 1;
+			else if((bg_record->state == RM_PARTITION_READY)
+				&& (state == RM_PARTITION_CONFIGURING)) {
+				/* This means the user did a reboot through
+				   mpirun but we missed the state
+				   change */
+				debug("Block %s skipped rebooting, "
+				      "but it really is.  "
+				      "Setting target_name back to %s",
+				      bg_record->bg_block_id,
+				      bg_record->user_name);
+				xfree(bg_record->target_name);
+				bg_record->target_name =
+					xstrdup(bg_record->user_name);
+			}
 
 			bg_record->state = state;
 
 			if(bg_record->state == RM_PARTITION_DEALLOCATING
-			   || skipped_dealloc) {
+			   || skipped_dealloc) 
 				_block_is_deallocating(bg_record);
-			} else if(bg_record->state == RM_PARTITION_CONFIGURING)
+#ifndef HAVE_BGL
+			else if(bg_record->state == RM_PARTITION_REBOOTING) {
+				/* This means the user did a reboot through
+				   mpirun */
+				debug("Block %s rebooting.  "
+				      "Setting target_name back to %s",
+				      bg_record->bg_block_id,
+				      bg_record->user_name);
+				xfree(bg_record->target_name);
+				bg_record->target_name =
+					xstrdup(bg_record->user_name);
+			}
+#endif
+			else if(bg_record->state == RM_PARTITION_CONFIGURING) 
 				bg_record->boot_state = 1;
+			else if(bg_record->state == RM_PARTITION_FREE) {
+				if(remove_from_bg_list(bg_job_block_list, 
+						       bg_record) 
+				   == SLURM_SUCCESS) {
+					num_unused_cpus += bg_record->cpu_cnt;
+				}
+				remove_from_bg_list(bg_booted_block_list,
+						    bg_record);
+			} 
 			updated = 1;
+			
 		}
 
 		/* check the boot state */
@@ -350,7 +432,33 @@ extern int update_block_list()
 				
 				break;
 			case RM_PARTITION_ERROR:
-				error("block in an error state");
+				bg_record->boot_state = 0;
+				bg_record->boot_count = 0;
+				if(bg_record->job_running > NO_JOB_RUNNING) {
+					error("Block %s in an error "
+					      "state while booting.  "
+					      "Failing job %u.",
+					      bg_record->bg_block_id,
+					      bg_record->job_running);
+					freeit = xmalloc(
+						sizeof(kill_job_struct_t));
+					freeit->jobid = bg_record->job_running;
+					list_push(kill_job_list, freeit);
+					if(remove_from_bg_list(
+						   bg_job_block_list, 
+						   bg_record) 
+					   == SLURM_SUCCESS) {
+						num_unused_cpus += 
+							bg_record->cpu_cnt;
+					} 
+				} else 
+					error("block %s in an error "
+					      "state while booting.",
+					      bg_record->bg_block_id);
+				remove_from_bg_list(bg_booted_block_list,
+						    bg_record);
+				trigger_block_error();
+				break;
 			case RM_PARTITION_FREE:
 				if(bg_record->boot_count < RETRY_BOOT_COUNT) {
 					slurm_mutex_unlock(&block_state_mutex);
@@ -386,6 +494,16 @@ extern int update_block_list()
 					slurm_mutex_lock(&block_state_mutex);
 					bg_record->boot_state = 0;
 					bg_record->boot_count = 0;
+					if(remove_from_bg_list(
+						   bg_job_block_list, 
+						   bg_record) 
+					   == SLURM_SUCCESS) {
+						num_unused_cpus += 
+							bg_record->cpu_cnt;
+					} 
+					remove_from_bg_list(
+						bg_booted_block_list,
+						bg_record);
 				}
 				break;
 			case RM_PARTITION_READY:
@@ -431,8 +549,15 @@ extern int update_block_list()
 	
 	/* kill all the jobs from unexpectedly freed blocks */
 	while((freeit = list_pop(kill_job_list))) {
-		debug2("killing job %d", freeit->jobid);
-		(void) slurm_fail_job(freeit->jobid);
+		debug2("Trying to requeue job %d", freeit->jobid);
+		lock_slurmctld(job_write_lock);
+		if((rc = job_requeue(0, freeit->jobid, -1))) {
+			error("couldn't requeue job %u, failing it: %s",
+			      freeit->jobid, 
+			      slurm_strerror(rc));
+			(void) job_fail(freeit->jobid);
+		}
+		unlock_slurmctld(job_write_lock);
 		_destroy_kill_struct(freeit);
 	}
 		
@@ -501,6 +626,7 @@ extern int update_freeing_block_list()
 			      state);
 
 			bg_record->state = state;
+			updated = 1;
 		}
 	next_block:
 		if ((rc = bridge_free_block(block_ptr)) 
diff --git a/src/plugins/select/bluegene/plugin/bg_job_place.c b/src/plugins/select/bluegene/plugin/bg_job_place.c
index 432548daf..abd4c48af 100644
--- a/src/plugins/select/bluegene/plugin/bg_job_place.c
+++ b/src/plugins/select/bluegene/plugin/bg_job_place.c
@@ -2,7 +2,7 @@
  *  bg_job_place.c - blue gene job placement (e.g. base block selection)
  *  functions.
  *
- *  $Id: bg_job_place.c 15759 2008-11-21 23:38:34Z da $ 
+ *  $Id: bg_job_place.c 17205 2009-04-09 17:24:11Z da $ 
  *****************************************************************************
  *  Copyright (C) 2004-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -137,13 +137,16 @@ static void _rotate_geo(uint16_t *req_geometry, int rot_cnt)
  */
 static int _bg_record_sort_aval_inc(bg_record_t* rec_a, bg_record_t* rec_b)
 {
-	int size_a = rec_a->node_cnt;
-	int size_b = rec_b->node_cnt;
-
-	if(rec_a->job_ptr && !rec_b->job_ptr)
+	if((rec_a->job_running == BLOCK_ERROR_STATE) 
+	   && (rec_b->job_running != BLOCK_ERROR_STATE))
+		return 1;
+	else if((rec_a->job_running != BLOCK_ERROR_STATE) 
+	   && (rec_b->job_running == BLOCK_ERROR_STATE))
 		return -1;
 	else if(!rec_a->job_ptr && rec_b->job_ptr)
 		return 1;
+	else if(rec_a->job_ptr && !rec_b->job_ptr)
+		return -1;
 	else if(rec_a->job_ptr && rec_b->job_ptr) {
 		if(rec_a->job_ptr->start_time > rec_b->job_ptr->start_time)
 			return 1;
@@ -151,29 +154,7 @@ static int _bg_record_sort_aval_inc(bg_record_t* rec_a, bg_record_t* rec_b)
 			return -1;
 	}
 
-	if (size_a < size_b)
-		return -1;
-	else if (size_a > size_b)
-		return 1;
-	if(rec_a->nodes && rec_b->nodes) {
-		size_a = strcmp(rec_a->nodes, rec_b->nodes);
-		if (size_a < 0)
-			return -1;
-		else if (size_a > 0)
-			return 1;
-	}
-#ifdef HAVE_BGL
-	if (rec_a->quarter < rec_b->quarter)
-		return -1;
-	else if (rec_a->quarter > rec_b->quarter)
-		return 1;
-
-	if(rec_a->nodecard < rec_b->nodecard)
-		return -1;
-	else if(rec_a->nodecard > rec_b->nodecard)
-		return 1;
-#endif
-	return 0;
+	return bg_record_cmpf_inc(rec_a, rec_b);
 }
 
 /* 
@@ -184,10 +165,16 @@ static int _bg_record_sort_aval_inc(bg_record_t* rec_a, bg_record_t* rec_b)
  */
 static int _bg_record_sort_aval_dec(bg_record_t* rec_a, bg_record_t* rec_b)
 {
-	if(rec_a->job_ptr && !rec_b->job_ptr)
+	if((rec_a->job_running == BLOCK_ERROR_STATE) 
+	   && (rec_b->job_running != BLOCK_ERROR_STATE))
+		return -1;
+	else if((rec_a->job_running != BLOCK_ERROR_STATE) 
+	   && (rec_b->job_running == BLOCK_ERROR_STATE))
 		return 1;
 	else if(!rec_a->job_ptr && rec_b->job_ptr)
 		return -1;
+	else if(rec_a->job_ptr && !rec_b->job_ptr)
+		return 1;
 	else if(rec_a->job_ptr && rec_b->job_ptr) {
 		if(rec_a->job_ptr->start_time > rec_b->job_ptr->start_time)
 			return -1;
@@ -363,7 +350,6 @@ static bg_record_t *_find_matching_block(List block_list,
 {
 	bg_record_t *bg_record = NULL;
 	ListIterator itr = NULL;
-	uint32_t proc_cnt = 0;
 	char tmp_char[256];
 	
 	debug("number of blocks to check: %d state %d", 
@@ -377,8 +363,10 @@ static bg_record_t *_find_matching_block(List block_list,
 		*/
 		debug3("%s job_running = %d", 
 		       bg_record->bg_block_id, bg_record->job_running);
-		/*block is messed up some how (BLOCK_ERROR_STATE) ignore it*/
-		if(bg_record->job_running == BLOCK_ERROR_STATE) {
+		/*block is messed up some how (BLOCK_ERROR_STATE)
+		 * ignore it or if state == RM_PARTITION_ERROR */
+		if((bg_record->job_running == BLOCK_ERROR_STATE)
+		   || (bg_record->state == RM_PARTITION_ERROR)) {
 			debug("block %s is in an error state (can't use)", 
 			      bg_record->bg_block_id);			
 			continue;
@@ -395,15 +383,15 @@ static bg_record_t *_find_matching_block(List block_list,
 		}
 
 		/* Check processor count */
-		proc_cnt = bg_record->bp_count * bg_record->cpus_per_bp;
 		debug3("asking for %u-%u looking at %d", 
-		       request->procs, max_procs, proc_cnt);
-		if ((proc_cnt < request->procs)
-		    || ((max_procs != NO_VAL) && (proc_cnt > max_procs))) {
+		       request->procs, max_procs, bg_record->cpu_cnt);
+		if ((bg_record->cpu_cnt < request->procs)
+		    || ((max_procs != NO_VAL)
+			&& (bg_record->cpu_cnt > max_procs))) {
 			/* We use the proccessor count per block here
 			   mostly to see if we can run on a smaller block. 
 			 */
-			convert_num_unit((float)proc_cnt, tmp_char, 
+			convert_num_unit((float)bg_record->cpu_cnt, tmp_char, 
 					 sizeof(tmp_char), UNIT_NONE);
 			debug("block %s CPU count (%s) not suitable",
 			      bg_record->bg_block_id, 
@@ -477,6 +465,21 @@ static bg_record_t *_find_matching_block(List block_list,
 		/***********************************************/
 		if ((request->conn_type != bg_record->conn_type)
 		    && (request->conn_type != SELECT_NAV)) {
+#ifndef HAVE_BGL
+			if(request->conn_type >= SELECT_SMALL) {
+				/* we only want to reboot blocks if
+				   they have to be so skip booted
+				   blocks if in small state
+				*/
+				if(check_image 
+				   && (bg_record->state
+				       == RM_PARTITION_READY)) {
+					*allow = 1;
+					continue;			
+				} 
+				goto good_conn_type;
+			} 
+#endif
 			debug("bg block %s conn-type not usable asking for %s "
 			      "bg_record is %s", 
 			      bg_record->bg_block_id,
@@ -484,7 +487,9 @@ static bg_record_t *_find_matching_block(List block_list,
 			      convert_conn_type(bg_record->conn_type));
 			continue;
 		} 
-
+#ifndef HAVE_BGL
+		good_conn_type:
+#endif
 		/*****************************************/
 		/* match up geometry as "best" possible  */
 		/*****************************************/
@@ -611,9 +616,12 @@ static int _check_for_booted_overlapping_blocks(
 				}
 			}
 
-			if(found_record->job_running != NO_JOB_RUNNING) {
-				if(found_record->job_running
-				   == BLOCK_ERROR_STATE)
+			if((found_record->job_running != NO_JOB_RUNNING) 
+			   || (found_record->state == RM_PARTITION_ERROR)) {
+				if((found_record->job_running
+				    == BLOCK_ERROR_STATE)
+				   || (found_record->state
+				       == RM_PARTITION_ERROR))
 					error("can't use %s, "
 					      "overlapping block %s "
 					      "is in an error state.",
@@ -651,12 +659,26 @@ static int _check_for_booted_overlapping_blocks(
 					}
 					destroy_bg_record(bg_record);
 					if(!found_record) {
-						debug2("This record wasn't "
-						       "found in the bg_list, "
-						       "no big deal, it "
-						       "probably wasn't added");
+						/* There may be a bug
+						   here where on a real
+						   system we don't go
+						   destroy this block
+						   in the real system.
+						   If that is the case we
+						   need to add the
+						   bg_record to the
+						   free_block_list
+						   instead of destroying
+						   it like above.
+						*/ 
+						debug("This record wasn't "
+						      "found in the bg_list, "
+						      "no big deal, it "
+						      "probably wasn't added");
 						//rc = SLURM_ERROR;
 					} else {
+						debug("removing the block "
+						      "from the system");
 						List temp_list =
 							list_create(NULL);
 						list_push(temp_list, 
@@ -728,7 +750,8 @@ static int _dynamically_request(List block_list, int *blocks_added,
 		*/
 		debug("trying with %d", create_try);
 		if((new_blocks = create_dynamic_block(block_list,
-						      request, temp_list))) {
+						      request, temp_list,
+						      true))) {
 			bg_record_t *bg_record = NULL;
 			while((bg_record = list_pop(new_blocks))) {
 				if(block_exist_in_list(block_list, bg_record))
@@ -942,12 +965,15 @@ static int _find_best_block_match(List block_list,
 	*found_bg_record = NULL;
 	allow = 0;
 
+	memset(&request, 0, sizeof(ba_request_t));
+
 	for(i=0; i<BA_SYSTEM_DIMENSIONS; i++) 
 		request.start[i] = start[i];
 	
 	for(i=0; i<BA_SYSTEM_DIMENSIONS; i++) 
 		request.geometry[i] = req_geometry[i];
-	
+
+	request.deny_pass = (uint16_t)NO_VAL;
 	request.save_name = NULL;
 	request.elongate_geos = NULL;
 	request.size = target_size;
@@ -1022,10 +1048,8 @@ static int _find_best_block_match(List block_list,
 					      "block %s in an error state "
 					      "because of bad bps.",
 					      bg_record->bg_block_id);
-					bg_record->job_running =
-						BLOCK_ERROR_STATE;
-					bg_record->state = RM_PARTITION_ERROR;
-					trigger_block_error();
+					put_block_in_error_state(
+						bg_record, BLOCK_ERROR_STATE);
 					continue;
 				}
 			}
@@ -1087,20 +1111,46 @@ static int _find_best_block_match(List block_list,
 			slurm_mutex_unlock(&block_state_mutex);
 			list_sort(job_list, (ListCmpF)_bg_record_sort_aval_inc);
 			while(1) {
+				bool track_down_nodes = true;
 				/* this gets altered in
 				 * create_dynamic_block so we reset it */
 				for(i=0; i<BA_SYSTEM_DIMENSIONS; i++) 
 					request.geometry[i] = req_geometry[i];
 
 				bg_record = list_pop(job_list);
-				if(bg_record)
-					debug2("taking off %d(%s) started at %d ends at %d",
-					       bg_record->job_running,
-					       bg_record->bg_block_id,
-					       bg_record->job_ptr->start_time,
-					       bg_record->job_ptr->end_time);
+				if(bg_record) {
+					if(bg_record->job_ptr)
+						debug2("taking off %d(%s) "
+						       "started at %d "
+						       "ends at %d",
+						       bg_record->job_running,
+						       bg_record->bg_block_id,
+						       bg_record->job_ptr->
+						       start_time,
+						       bg_record->job_ptr->
+						       end_time);
+					else if(bg_record->job_running 
+						== BLOCK_ERROR_STATE)
+						debug2("taking off (%s) "
+						       "which is in an error "
+						       "state",
+						       bg_record->job_running,
+						       bg_record->bg_block_id,
+						       bg_record->job_ptr->
+						       start_time,
+						       bg_record->job_ptr->
+						       end_time);
+				} else 
+					/* This means we didn't have
+					   any jobs to take off
+					   anymore so we are making
+					   sure we can look at every
+					   node on the system.
+					*/
+					track_down_nodes = false;
 				if(!(new_blocks = create_dynamic_block(
-					     block_list, &request, job_list))) {
+					     block_list, &request, job_list,
+					     track_down_nodes))) {
 					destroy_bg_record(bg_record);
 					if(errno == ESLURM_INTERCONNECT_FAILURE
 					   || !list_count(job_list)) {
@@ -1239,7 +1289,7 @@ extern int submit_job(struct job_record *job_ptr, bitstr_t *slurm_block_bitmap,
 #ifdef HAVE_BG
 	bg_record_t* bg_record = NULL;
 	char buf[100];
-	uint16_t tmp16 = (uint16_t)NO_VAL;
+	uint16_t conn_type = (uint16_t)NO_VAL;
 	List block_list = NULL;
 	int blocks_added = 0;
 	time_t starttime = time(NULL);
@@ -1257,6 +1307,33 @@ extern int submit_job(struct job_record *job_ptr, bitstr_t *slurm_block_bitmap,
 
 	job_block_test_list = bg_job_block_list;
 	
+	select_g_get_jobinfo(job_ptr->select_jobinfo,
+			     SELECT_DATA_CONN_TYPE, &conn_type);
+	if(conn_type == SELECT_NAV) {
+		uint32_t max_procs = (uint32_t)NO_VAL;
+		if(bluegene_bp_node_cnt == bluegene_nodecard_node_cnt)
+			conn_type = SELECT_SMALL;
+		else if(min_nodes > 1) {
+			conn_type = SELECT_TORUS;
+			/* make sure the max procs are set to NO_VAL */
+			select_g_set_jobinfo(job_ptr->select_jobinfo,
+					     SELECT_DATA_MAX_PROCS,
+					     &max_procs);
+
+		} else {
+			select_g_get_jobinfo(job_ptr->select_jobinfo,
+					     SELECT_DATA_MAX_PROCS,
+					     &max_procs);
+			if((max_procs > procs_per_node)
+			   || (max_procs == NO_VAL))
+				conn_type = SELECT_TORUS;
+			else
+				conn_type = SELECT_SMALL;
+		}
+		select_g_set_jobinfo(job_ptr->select_jobinfo,
+				     SELECT_DATA_CONN_TYPE,
+				     &conn_type);
+	}
 	select_g_sprint_jobinfo(job_ptr->select_jobinfo, buf, sizeof(buf), 
 				SELECT_PRINT_MIXED);
 	debug("bluegene:submit_job: %s nodes=%u-%u-%u", 
@@ -1311,7 +1388,8 @@ extern int submit_job(struct job_record *job_ptr, bitstr_t *slurm_block_bitmap,
 				else
 					starttime =
 						bg_record->job_ptr->end_time;
-			}
+			} else if(bg_record->job_running == BLOCK_ERROR_STATE)
+				starttime = INFINITE;
 						
 			job_ptr->start_time = starttime;
 			
@@ -1325,22 +1403,14 @@ extern int submit_job(struct job_record *job_ptr, bitstr_t *slurm_block_bitmap,
 			if(!bg_record->bg_block_id) {
 				uint16_t geo[BA_SYSTEM_DIMENSIONS];
 				
-				debug2("%d can start job at "
-				       "%u on %s on unmade block",
-				       test_only, starttime,
+				debug2("%d can start unassigned job %u at "
+				       "%u on %s",
+				       test_only, job_ptr->job_id, starttime,
 				       bg_record->nodes);
 				select_g_set_jobinfo(job_ptr->select_jobinfo,
 					     SELECT_DATA_BLOCK_ID,
 					     "unassigned");
-				/* if(job_ptr->num_procs < bluegene_bp_node_cnt  */
-/* 				   && job_ptr->num_procs > 0) { */
-/* 					i = procs_per_node/job_ptr->num_procs; */
-/* 					debug2("divide by %d", i); */
-/* 				} else  */
-/* 					i = 1; */
-/* 				min_nodes *= bluegene_bp_node_cnt/i; */
-				/* this seems to do the same thing as
-				 * above */
+
 				min_nodes = bg_record->node_cnt;
 				select_g_set_jobinfo(job_ptr->select_jobinfo,
 					     SELECT_DATA_NODE_CNT,
@@ -1360,10 +1430,11 @@ extern int submit_job(struct job_record *job_ptr, bitstr_t *slurm_block_bitmap,
 					error("Small block used in "
 					      "non-shared partition");
 				
-				debug2("%d can start job at %u on %s",
-				       test_only, starttime,
+				debug2("%d can start job %u at %u on %s(%s)",
+				       test_only, job_ptr->job_id, starttime,
+				       bg_record->bg_block_id,
 				       bg_record->nodes);
-
+				
 				select_g_set_jobinfo(job_ptr->select_jobinfo,
 						     SELECT_DATA_BLOCK_ID,
 						     bg_record->bg_block_id);
@@ -1374,10 +1445,10 @@ extern int submit_job(struct job_record *job_ptr, bitstr_t *slurm_block_bitmap,
 						     SELECT_DATA_GEOMETRY, 
 						     &bg_record->geo);
 
-				tmp16 = bg_record->conn_type;
-				select_g_set_jobinfo(job_ptr->select_jobinfo,
-						     SELECT_DATA_CONN_TYPE, 
-						     &tmp16);
+				/* tmp16 = bg_record->conn_type; */
+/* 				select_g_set_jobinfo(job_ptr->select_jobinfo, */
+/* 						     SELECT_DATA_CONN_TYPE,  */
+/* 						     &tmp16); */
 			}
 		} else {
 			error("we got a success, but no block back");
@@ -1424,12 +1495,41 @@ extern int test_job_list(List req_list)
 
 	itr = list_iterator_create(req_list);
 	while((will_run = list_next(itr))) {
+		uint16_t conn_type = (uint16_t)NO_VAL;
+
 		if(!will_run->job_ptr) {
 			error("test_job_list: you need to give me a job_ptr");
 			rc = SLURM_ERROR;
 			break;
 		}
 		
+		select_g_get_jobinfo(will_run->job_ptr->select_jobinfo,
+				     SELECT_DATA_CONN_TYPE, &conn_type);
+		if(conn_type == SELECT_NAV) {
+			uint32_t max_procs = (uint32_t)NO_VAL;
+			if(will_run->min_nodes > 1) {
+				conn_type = SELECT_TORUS;
+				/* make sure the max procs are set to NO_VAL */
+				select_g_set_jobinfo(
+					will_run->job_ptr->select_jobinfo,
+					SELECT_DATA_MAX_PROCS,
+					&max_procs);
+				
+			} else {
+				select_g_get_jobinfo(
+					will_run->job_ptr->select_jobinfo,
+					SELECT_DATA_MAX_PROCS,
+					&max_procs);
+				if((max_procs > procs_per_node)
+				   || (max_procs == NO_VAL))
+					conn_type = SELECT_TORUS;
+				else
+					conn_type = SELECT_SMALL;
+			}
+			select_g_set_jobinfo(will_run->job_ptr->select_jobinfo,
+					     SELECT_DATA_CONN_TYPE,
+					     &conn_type);
+		}
 		select_g_sprint_jobinfo(will_run->job_ptr->select_jobinfo,
 					buf, sizeof(buf), 
 					SELECT_PRINT_MIXED);
diff --git a/src/plugins/select/bluegene/plugin/bg_job_run.c b/src/plugins/select/bluegene/plugin/bg_job_run.c
index 7388b79f4..800a6813d 100644
--- a/src/plugins/select/bluegene/plugin/bg_job_run.c
+++ b/src/plugins/select/bluegene/plugin/bg_job_run.c
@@ -2,7 +2,7 @@
  *  bg_job_run.c - blue gene job execution (e.g. initiation and termination) 
  *  functions.
  *
- *  $Id: bg_job_run.c 16146 2009-01-06 18:20:48Z da $ 
+ *  $Id: bg_job_run.c 17202 2009-04-09 16:56:23Z da $ 
  *****************************************************************************
  *  Copyright (C) 2004-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -76,6 +76,10 @@ typedef struct bg_update {
 	struct job_record *job_ptr;	/* pointer to job running on
 					 * block or NULL if no job */
 	uint16_t reboot;	/* reboot block before starting job */
+#ifndef HAVE_BGL
+	uint16_t conn_type;     /* needed to boot small blocks into
+				   HTC mode or not */
+#endif
 	pm_partition_id_t bg_block_id;
 	char *blrtsimage;       /* BlrtsImage for this block */
 	char *linuximage;       /* LinuxImage for this block */
@@ -87,6 +91,7 @@ static List bg_update_list = NULL;
 
 static pthread_mutex_t agent_cnt_mutex = PTHREAD_MUTEX_INITIALIZER;
 static pthread_mutex_t job_start_mutex = PTHREAD_MUTEX_INITIALIZER;
+static pthread_cond_t agent_cond = PTHREAD_COND_INITIALIZER;
 static int agent_cnt = 0;
 
 #ifdef HAVE_BG_FILES
@@ -109,16 +114,17 @@ static void	_term_agent(bg_update_t *bg_update_ptr);
 /* Kill a job and remove its record from MMCS */
 static int _remove_job(db_job_id_t job_id)
 {
-	int i, rc;
+	int rc;
+	int count = 0;
 	rm_job_t *job_rec = NULL;
 	rm_job_state_t job_state;
 
 	debug("removing job %d from MMCS", job_id);
-	for (i=0; i<MAX_POLL_RETRIES; i++) {
-		if (i > 0)
+	while(1) {
+		if (count)
 			sleep(POLL_INTERVAL);
+		count++;
 
-		
 		/* Find the job */
 		if ((rc = bridge_get_job(job_id, &job_rec)) != STATUS_OK) {
 			
@@ -153,36 +159,45 @@ static int _remove_job(db_job_id_t job_id)
 		/* check the state and process accordingly */
 		if(job_state == RM_JOB_TERMINATED)
 			return STATUS_OK;
-		else if(job_state == RM_JOB_DYING)
+		else if(job_state == RM_JOB_DYING) {
+			/* start sending sigkills for the last 5 tries */
+			if(count > MAX_POLL_RETRIES) 
+				error("Job %d isn't dying, trying for "
+				      "%d seconds", count*POLL_INTERVAL);
 			continue;
-		else if(job_state == RM_JOB_ERROR) {
+		} else if(job_state == RM_JOB_ERROR) {
 			error("job %d is in a error state.", job_id);
 			
 			//free_bg_block();
 			return STATUS_OK;
 		}
 
-		(void) bridge_signal_job(job_id, SIGKILL);
-		rc = bridge_cancel_job(job_id);
-		/* it doesn't appear that this does anything. */
-		// rc = bridge_remove_job(job_id);
+		/* we have been told the next 2 lines do the same
+		 * thing, but I don't believe it to be true.  In most
+		 * cases when you do a signal of SIGTERM the mpirun
+		 * process gets killed with a SIGTERM.  In the case of
+		 * bridge_cancel_job it always gets killed with a
+		 * SIGKILL.  From IBM's point of view that is a bad
+		 * deally, so we are going to use signal ;).
+		 */
+
+//		 rc = bridge_cancel_job(job_id);
+		 rc = bridge_signal_job(job_id, SIGTERM);
 
 		if (rc != STATUS_OK) {
 			if (rc == JOB_NOT_FOUND) {
 				debug("job %d removed from MMCS", job_id);
 				return STATUS_OK;
-			} 
+			}
 			if(rc == INCOMPATIBLE_STATE)
 				debug("job %d is in an INCOMPATIBLE_STATE",
 				      job_id);
 			else
-				error("bridge_cancel_job(%d): %s", job_id, 
+				error("bridge_cancel_job(%d): %s", job_id,
 				      bg_err_str(rc));
 		}
 	}
-	/* try once more... */
-	/* it doesn't appear that this does anything. */
-	// (void) bridge_remove_job(job_id);
+
 	error("Failed to remove job %d from MMCS", job_id);
 	return INTERNAL_ERROR;
 }
@@ -197,20 +212,17 @@ static int _reset_block(bg_record_t *bg_record)
 			bg_record->job_running = NO_JOB_RUNNING;
 			bg_record->job_ptr = NULL;
 		}
-		/* remove user from list */
-		
+		/* remove user from list */		
 		
 		if(bg_record->target_name) {
-			if(strcmp(bg_record->target_name, 
-				  bg_slurm_user_name)) {
+			if(strcmp(bg_record->target_name, bg_slurm_user_name)) {
 				xfree(bg_record->target_name);
 				bg_record->target_name = 
 					xstrdup(bg_slurm_user_name);
 			}
 			update_block_user(bg_record, 1);
 		} else {
-			bg_record->target_name = 
-				xstrdup(bg_slurm_user_name);
+			bg_record->target_name = xstrdup(bg_slurm_user_name);
 		}	
 		
 			
@@ -220,8 +232,7 @@ static int _reset_block(bg_record_t *bg_record)
 		last_bg_update = time(NULL);
 		if(remove_from_bg_list(bg_job_block_list, bg_record) 
 		   == SLURM_SUCCESS) {
-			num_unused_cpus += 
-				bg_record->bp_count*bg_record->cpus_per_bp;
+			num_unused_cpus += bg_record->cpu_cnt;
 		}
 	} else {
 		error("No block given to reset");
@@ -261,11 +272,11 @@ static void _sync_agent(bg_update_t *bg_update_ptr)
 	bg_record->job_running = bg_update_ptr->job_ptr->job_id;
 	bg_record->job_ptr = bg_update_ptr->job_ptr;
 
-	if(!block_exist_in_list(bg_job_block_list, bg_record)) {
+	if(!block_ptr_exist_in_list(bg_job_block_list, bg_record)) {
 		list_push(bg_job_block_list, bg_record);
-		num_unused_cpus -= bg_record->bp_count*bg_record->cpus_per_bp;
+		num_unused_cpus -= bg_record->cpu_cnt;
 	}
-	if(!block_exist_in_list(bg_booted_block_list, bg_record)) 
+	if(!block_ptr_exist_in_list(bg_booted_block_list, bg_record)) 
 		list_push(bg_booted_block_list, bg_record);
 	slurm_mutex_unlock(&block_state_mutex);
 
@@ -314,8 +325,7 @@ static void _start_agent(bg_update_t *bg_update_ptr)
 
 	slurm_mutex_lock(&job_start_mutex);
 		
-	bg_record = 
-		find_bg_record_in_list(bg_list, bg_update_ptr->bg_block_id);
+	bg_record = find_bg_record_in_list(bg_list, bg_update_ptr->bg_block_id);
 
 	if(!bg_record) {
 		error("block %s not found in bg_list",
@@ -339,6 +349,7 @@ static void _start_agent(bg_update_t *bg_update_ptr)
 	}
 	slurm_mutex_lock(&block_state_mutex);
 	if(bg_record->job_running <= NO_JOB_RUNNING) {
+		// _reset_block(bg_record); should already happened
 		slurm_mutex_unlock(&block_state_mutex);
 		slurm_mutex_unlock(&job_start_mutex);
 		debug("job %u finished during the queueing job "
@@ -350,6 +361,9 @@ static void _start_agent(bg_update_t *bg_update_ptr)
 		slurm_mutex_unlock(&block_state_mutex);
 		debug("Block is in Deallocating state, waiting for free.");
 		bg_free_block(bg_record);
+		/* no reason to reboot here since we are already
+		   deallocating */
+		bg_update_ptr->reboot = 0;
 	} else 
 		slurm_mutex_unlock(&block_state_mutex);
 
@@ -357,8 +371,7 @@ static void _start_agent(bg_update_t *bg_update_ptr)
 	delete_list = list_create(NULL);
 	slurm_mutex_lock(&block_state_mutex);
 	itr = list_iterator_create(bg_list);
-	while ((found_record = (bg_record_t*) 
-		list_next(itr)) != NULL) {
+	while ((found_record = list_next(itr))) {
 		if ((!found_record) || (bg_record == found_record))
 			continue;
 		
@@ -438,6 +451,7 @@ static void _start_agent(bg_update_t *bg_update_ptr)
 	
 	slurm_mutex_lock(&block_state_mutex);
 	if(bg_record->job_running <= NO_JOB_RUNNING) {
+		// _reset_block(bg_record); should already happened
 		slurm_mutex_unlock(&block_state_mutex);
 		slurm_mutex_unlock(&job_start_mutex);
 		debug("job %u already finished before boot",
@@ -455,6 +469,13 @@ static void _start_agent(bg_update_t *bg_update_ptr)
 		bg_record->blrtsimage = xstrdup(bg_update_ptr->blrtsimage);
 		rc = 1;
 	}
+#else 
+	if((bg_update_ptr->conn_type >= SELECT_SMALL) 
+		&& (bg_update_ptr->conn_type != bg_record->conn_type)) {
+		debug3("changing small block mode from %u to %u",
+		       bg_record->conn_type, bg_update_ptr->conn_type);
+		rc = 1;
+	}
 #endif
 	if(bg_update_ptr->linuximage
 	   && strcasecmp(bg_update_ptr->linuximage, bg_record->linuximage)) {
@@ -538,6 +559,33 @@ static void _start_agent(bg_update_t *bg_update_ptr)
 			error("bridge_modify_block(RM_MODIFY_IoloadImg)", 
 			      bg_err_str(rc));
 
+		if(bg_update_ptr->conn_type > SELECT_SMALL) {
+			char *conn_type = NULL;
+			switch(bg_update_ptr->conn_type) {
+			case SELECT_HTC_S:
+				conn_type = "s";
+				break;
+			case SELECT_HTC_D:
+				conn_type = "d";
+				break;
+			case SELECT_HTC_V:
+				conn_type = "v";
+				break;
+			case SELECT_HTC_L:
+				conn_type = "l";
+				break;
+			default:
+				break;
+			}
+			/* the option has to be set before the pool can be
+			   set */
+			if ((rc = bridge_modify_block(
+				     bg_record->bg_block_id,
+				     RM_MODIFY_Options,
+				     conn_type)) != STATUS_OK)
+				error("bridge_set_data(RM_MODIFY_Options)",
+				      bg_err_str(rc));
+		}
 #endif
 		if ((rc = bridge_modify_block(bg_record->bg_block_id,
 					      RM_MODIFY_MloaderImg, 
@@ -550,22 +598,22 @@ static void _start_agent(bg_update_t *bg_update_ptr)
 		slurm_mutex_lock(&block_state_mutex);
 		bg_record->modifying = 0;		
 		slurm_mutex_unlock(&block_state_mutex);		
-	} else if(bg_update_ptr->reboot) 
-#ifdef HAVE_BGL
+	} else if(bg_update_ptr->reboot) {
+		slurm_mutex_lock(&block_state_mutex);
+		bg_record->modifying = 1;
+		slurm_mutex_unlock(&block_state_mutex);
+
 		bg_free_block(bg_record);
-#else
-		bg_reboot_block(bg_record);
-#endif
+
+		slurm_mutex_lock(&block_state_mutex);
+		bg_record->modifying = 0;		
+		slurm_mutex_unlock(&block_state_mutex);		
+	}
 
 	if(bg_record->state == RM_PARTITION_FREE) {
 		if((rc = boot_block(bg_record)) != SLURM_SUCCESS) {
 			slurm_mutex_lock(&block_state_mutex);
 			_reset_block(bg_record);
-			if (remove_from_bg_list(bg_job_block_list, bg_record)
-			    == SLURM_SUCCESS) {
-				num_unused_cpus += bg_record->bp_count
-					*bg_record->cpus_per_bp;
-			}
 			slurm_mutex_unlock(&block_state_mutex);
 			sleep(2);	
 			/* wait for the slurmd to begin 
@@ -626,13 +674,11 @@ static void _start_agent(bg_update_t *bg_update_ptr)
 		slurm_mutex_lock(&block_state_mutex);
 		if (remove_from_bg_list(bg_job_block_list, bg_record)
 		    == SLURM_SUCCESS) {
-			num_unused_cpus += bg_record->bp_count
-				*bg_record->cpus_per_bp;
+			num_unused_cpus += bg_record->cpu_cnt;
 		}
 		slurm_mutex_unlock(&block_state_mutex);
 	}
 	slurm_mutex_unlock(&job_start_mutex);
-	
 }
 
 /* Perform job termination work */
@@ -732,8 +778,7 @@ static void _term_agent(bg_update_t *bg_update_ptr)
 #endif
 	
 	/* remove the block's users */
-	bg_record = 
-		find_bg_record_in_list(bg_list, bg_update_ptr->bg_block_id);
+	bg_record = find_bg_record_in_list(bg_list, bg_update_ptr->bg_block_id);
 	if(bg_record) {
 		debug("got the record %s user is %s",
 		      bg_record->bg_block_id,
@@ -813,6 +858,8 @@ static void *_block_agent(void *args)
 	if (agent_cnt == 0) {
 		list_destroy(bg_update_list);
 		bg_update_list = NULL;
+		pthread_cond_signal(&agent_cond);
+			
 	}
 	slurm_mutex_unlock(&agent_cnt_mutex);
 	return NULL;
@@ -998,7 +1045,12 @@ extern int start_job(struct job_record *job_ptr)
 				     SELECT_DATA_BLRTS_IMAGE, 
 				     bg_update_ptr->blrtsimage);
 	}
+#else
+	select_g_get_jobinfo(job_ptr->select_jobinfo,
+			     SELECT_DATA_CONN_TYPE, 
+			     &(bg_update_ptr->conn_type));
 #endif
+
 	select_g_get_jobinfo(job_ptr->select_jobinfo,
 			     SELECT_DATA_LINUX_IMAGE, 
 			     &(bg_update_ptr->linuximage));
@@ -1030,16 +1082,14 @@ extern int start_job(struct job_record *job_ptr)
 		find_bg_record_in_list(bg_list, bg_update_ptr->bg_block_id);
 	if (bg_record) {
 		slurm_mutex_lock(&block_state_mutex);
-		job_ptr->num_procs = (bg_record->cpus_per_bp *
-				      bg_record->bp_count);
+		job_ptr->num_procs = bg_record->cpu_cnt;
 		bg_record->job_running = bg_update_ptr->job_ptr->job_id;
 		bg_record->job_ptr = bg_update_ptr->job_ptr;
-		if(!block_exist_in_list(bg_job_block_list, bg_record)) {
+		if(!block_ptr_exist_in_list(bg_job_block_list, bg_record)) {
 			list_push(bg_job_block_list, bg_record);
-			num_unused_cpus -= 
-				bg_record->bp_count*bg_record->cpus_per_bp;
+			num_unused_cpus -= bg_record->cpu_cnt;
 		}
-		if(!block_exist_in_list(bg_booted_block_list, bg_record))
+		if(!block_ptr_exist_in_list(bg_booted_block_list, bg_record))
 			list_push(bg_booted_block_list, bg_record);
 		slurm_mutex_unlock(&block_state_mutex);
 	} else {
@@ -1210,7 +1260,7 @@ extern int boot_block(bg_record_t *bg_record)
 	if ((rc = bridge_set_block_owner(bg_record->bg_block_id, 
 					 bg_slurm_user_name)) 
 	    != STATUS_OK) {
-		error("bridge_set_part_owner(%s,%s): %s", 
+		error("bridge_set_block_owner(%s,%s): %s", 
 		      bg_record->bg_block_id, 
 		      bg_slurm_user_name,
 		      bg_err_str(rc));
@@ -1241,7 +1291,7 @@ extern int boot_block(bg_record_t *bg_record)
 	}
 	
 	slurm_mutex_lock(&block_state_mutex);
-	if(!block_exist_in_list(bg_booted_block_list, bg_record))
+	if(!block_ptr_exist_in_list(bg_booted_block_list, bg_record))
 		list_push(bg_booted_block_list, bg_record);
 	slurm_mutex_unlock(&block_state_mutex);
 	
@@ -1267,7 +1317,7 @@ extern int boot_block(bg_record_t *bg_record)
 	slurm_mutex_unlock(&block_state_mutex);
 #else
 	slurm_mutex_lock(&block_state_mutex);
-	if(!block_exist_in_list(bg_booted_block_list, bg_record))
+	if(!block_ptr_exist_in_list(bg_booted_block_list, bg_record))
 		list_push(bg_booted_block_list, bg_record);
 	bg_record->state = RM_PARTITION_READY;
 	last_bg_update = time(NULL);
@@ -1277,3 +1327,9 @@ extern int boot_block(bg_record_t *bg_record)
 
 	return SLURM_SUCCESS;
 }
+
+extern void waitfor_block_agents()
+{
+	if(agent_cnt)
+		pthread_cond_wait(&agent_cond, &agent_cnt_mutex);
+}
diff --git a/src/plugins/select/bluegene/plugin/bg_job_run.h b/src/plugins/select/bluegene/plugin/bg_job_run.h
index 9ea7ea949..694df91af 100644
--- a/src/plugins/select/bluegene/plugin/bg_job_run.h
+++ b/src/plugins/select/bluegene/plugin/bg_job_run.h
@@ -86,4 +86,6 @@ extern int term_job(struct job_record *job_ptr);
  */
 extern int term_jobs_on_block(pm_partition_id_t bg_block_id);
 
+extern void waitfor_block_agents();
+
 #endif /* _BG_JOB_RUN_H_ */
diff --git a/src/plugins/select/bluegene/plugin/bg_record_functions.c b/src/plugins/select/bluegene/plugin/bg_record_functions.c
index d7625b7d5..e46ff618a 100644
--- a/src/plugins/select/bluegene/plugin/bg_record_functions.c
+++ b/src/plugins/select/bluegene/plugin/bg_record_functions.c
@@ -41,6 +41,7 @@
 
 #include "src/common/uid.h"
 #include "src/slurmctld/trigger_mgr.h"
+#include "src/slurmctld/locks.h"
 
 /* some local functions */
 #ifdef HAVE_BG
@@ -62,7 +63,7 @@ extern void print_bg_record(bg_record_t* bg_record)
 	info("\tsize: %d BPs %u Nodes %d cpus", 
 	     bg_record->bp_count,
 	     bg_record->node_cnt,
-	     bg_record->cpus_per_bp * bg_record->bp_count);
+	     bg_record->cpu_cnt);
 	info("\tgeo: %ux%ux%u", bg_record->geo[X], bg_record->geo[Y], 
 	     bg_record->geo[Z]);
 	info("\tconn_type: %s", convert_conn_type(bg_record->conn_type));
@@ -111,25 +112,26 @@ extern void destroy_bg_record(void *object)
 	}
 }
 
+/* see if a record already of like bitmaps exists in a list */
 extern int block_exist_in_list(List my_list, bg_record_t *bg_record)
 {
 	ListIterator itr = list_iterator_create(my_list);
 	bg_record_t *found_record = NULL;
 	int rc = 0;
 
-	while ((found_record = (bg_record_t *) list_next(itr)) != NULL) {
+	while ((found_record = list_next(itr))) {
 		/* check for full node bitmap compare */
 		if(bit_equal(bg_record->bitmap, found_record->bitmap)
 		   && bit_equal(bg_record->ionode_bitmap,
 				found_record->ionode_bitmap)) {
 			if(bg_record->ionodes)
-				debug3("This block %s[%s] "
+				debug("This block %s[%s] "
 				       "is already in the list %s",
 				       bg_record->nodes,
 				       bg_record->ionodes,
 				       found_record->bg_block_id);
 			else
-				debug3("This block %s "
+				debug("This block %s "
 				       "is already in the list %s",
 				       bg_record->nodes,
 				       found_record->bg_block_id);
@@ -142,6 +144,23 @@ extern int block_exist_in_list(List my_list, bg_record_t *bg_record)
 	return rc;
 }
 
+/* see if the exact record already exists in a list */
+extern int block_ptr_exist_in_list(List my_list, bg_record_t *bg_record)
+{
+	ListIterator itr = list_iterator_create(my_list);
+	bg_record_t *found_record = NULL;
+	int rc = 0;
+
+	while ((found_record = list_next(itr))) {
+		if(bg_record == found_record) {
+			rc = 1;
+			break;
+		}
+	}
+	list_iterator_destroy(itr);
+	return rc;
+}
+
 extern void process_nodes(bg_record_t *bg_record, bool startup)
 {
 #ifdef HAVE_BG
@@ -162,7 +181,7 @@ extern void process_nodes(bg_record_t *bg_record, bool startup)
 		}
 		memset(&best_start, 0, sizeof(best_start));
 		bg_record->bp_count = 0;
-		if((bg_record->conn_type == SELECT_SMALL) && (!startup))
+		if((bg_record->conn_type >= SELECT_SMALL) && (!startup))
 			error("We shouldn't be here there could be some "
 			      "badness if we use this logic %s",
 			      bg_record->nodes);
@@ -296,12 +315,19 @@ extern void process_nodes(bg_record_t *bg_record, bool startup)
 	       alpha_num[bg_record->geo[Y]],
 	       alpha_num[bg_record->geo[Z]],
 	       bg_record->bp_count);
-
-	if ((bg_record->geo[X] == DIM_SIZE[X])
-	    && (bg_record->geo[Y] == DIM_SIZE[Y])
-	    && (bg_record->geo[Z] == DIM_SIZE[Z])) {
-		bg_record->full_block = 1;	
-	}	
+	/* This check is for sub midplane systems to figure out what
+	   the largest block can be.
+	*/
+	if((DIM_SIZE[X] > 1) || (DIM_SIZE[Y] > 1) || (DIM_SIZE[Z] > 1)) {
+		/* means we have more than 1 base partition */
+		if ((bg_record->geo[X] == DIM_SIZE[X])
+		    && (bg_record->geo[Y] == DIM_SIZE[Y])
+		    && (bg_record->geo[Z] == DIM_SIZE[Z])) {
+			bg_record->full_block = 1;	
+		}	
+	} else if(bg_record->node_cnt == bluegene_bp_node_cnt)
+		bg_record->full_block = 1;
+	
 	
 /* #ifndef HAVE_BG_FILES */
 /* 	max_dim[X] = MAX(max_dim[X], end[X]); */
@@ -420,28 +446,26 @@ extern void copy_bg_record(bg_record_t *fir_record, bg_record_t *sec_record)
 	}
 	sec_record->job_running = fir_record->job_running;
 	sec_record->job_ptr = fir_record->job_ptr;
-	sec_record->cpus_per_bp = fir_record->cpus_per_bp;
+	sec_record->cpu_cnt = fir_record->cpu_cnt;
 	sec_record->node_cnt = fir_record->node_cnt;
-#ifdef HAVE_BGL
-	sec_record->quarter = fir_record->quarter;
-	sec_record->nodecard = fir_record->nodecard;
-#endif
 }
 
 /* 
  * Comparator used for sorting blocks smallest to largest
  * 
- * returns: -1: rec_a >rec_b   0: rec_a == rec_b   1: rec_a < rec_b
+ * returns: -1: rec_a > rec_b   0: rec_a == rec_b   1: rec_a < rec_b
  * 
  */
 extern int bg_record_cmpf_inc(bg_record_t* rec_a, bg_record_t* rec_b)
 {
 	int size_a = rec_a->node_cnt;
 	int size_b = rec_b->node_cnt;
+
 	if (size_a < size_b)
 		return -1;
 	else if (size_a > size_b)
 		return 1;
+
 	if(rec_a->nodes && rec_b->nodes) {
 		size_a = strcmp(rec_a->nodes, rec_b->nodes);
 		if (size_a < 0)
@@ -449,22 +473,15 @@ extern int bg_record_cmpf_inc(bg_record_t* rec_a, bg_record_t* rec_b)
 		else if (size_a > 0)
 			return 1;
 	}
-#ifdef HAVE_BGL
-	if (rec_a->quarter < rec_b->quarter)
-		return -1;
-	else if (rec_a->quarter > rec_b->quarter)
-		return 1;
 
-	if(rec_a->nodecard < rec_b->nodecard)
-		return -1;
-	else if(rec_a->nodecard > rec_b->nodecard)
-		return 1;
-#else
+	if(!rec_a->ionode_bitmap || !rec_b->ionode_bitmap)
+		return 0;
+
 	if(bit_ffs(rec_a->ionode_bitmap) < bit_ffs(rec_b->ionode_bitmap))
 		return -1;
 	else
 		return 1;
-#endif
+
 	return 0;
 }
 
@@ -473,29 +490,25 @@ extern bg_record_t *find_bg_record_in_list(List my_list, char *bg_block_id)
 	ListIterator itr;
 	bg_record_t *bg_record = NULL;
 		
+	xassert(my_list);
+
 	if(!bg_block_id)
 		return NULL;
 			
-	if(my_list) {
-		slurm_mutex_lock(&block_state_mutex);
-		itr = list_iterator_create(my_list);
-		while ((bg_record = (bg_record_t *) list_next(itr)) != NULL) {
-			if(bg_record->bg_block_id)
-				if (!strcmp(bg_record->bg_block_id, 
-					    bg_block_id))
-					break;
-		}
-		list_iterator_destroy(itr);
-		slurm_mutex_unlock(&block_state_mutex);
-		if(bg_record)
-			return bg_record;
-		else
-			return NULL;
-	} else {
-		error("find_bg_record_in_list: no list");
-		return NULL;
+	slurm_mutex_lock(&block_state_mutex);
+	itr = list_iterator_create(my_list);
+	while ((bg_record = (bg_record_t *) list_next(itr)) != NULL) {
+		if(bg_record->bg_block_id)
+			if (!strcmp(bg_record->bg_block_id, 
+				    bg_block_id))
+				break;
 	}
-	
+	list_iterator_destroy(itr);
+	slurm_mutex_unlock(&block_state_mutex);
+	if(bg_record)
+		return bg_record;
+	else
+		return NULL;
 }
 
 /* All changes to the bg_list target_name must 
@@ -573,13 +586,30 @@ extern void drain_as_needed(bg_record_t *bg_record, char *reason)
 	bool needed = true;
 	hostlist_t hl;
 	char *host = NULL;
-	char bg_down_node[128];
 
-	if(bg_record->job_running > NO_JOB_RUNNING)
-		slurm_fail_job(bg_record->job_running);			
+	if(bg_record->job_running > NO_JOB_RUNNING) {
+		int rc;
+		slurmctld_lock_t job_write_lock = {
+			NO_LOCK, WRITE_LOCK, WRITE_LOCK, NO_LOCK };
+		lock_slurmctld(job_write_lock);
+		debug2("Trying to requeue job %d", bg_record->job_running);
+		if((rc = job_requeue(0, bg_record->job_running, -1))) {
+			error("couldn't requeue job %u, failing it: %s",
+			      bg_record->job_running, 
+			      slurm_strerror(rc));
+			job_fail(bg_record->job_running);
+		}
+		unlock_slurmctld(job_write_lock);
+		slurm_mutex_lock(&block_state_mutex);
+		if(remove_from_bg_list(bg_job_block_list, bg_record) 
+		   == SLURM_SUCCESS) {
+			num_unused_cpus += bg_record->cpu_cnt;
+		}
+		slurm_mutex_unlock(&block_state_mutex);
+	}
 
 	/* small blocks */
-	if(bg_record->cpus_per_bp != procs_per_node) {
+	if(bg_record->cpu_cnt < procs_per_node) {
 		debug2("small block");
 		goto end_it;
 	}
@@ -591,7 +621,7 @@ extern void drain_as_needed(bg_record_t *bg_record, char *reason)
 		return;
 	}
 	while ((host = hostlist_shift(hl))) {
-		if (node_already_down(bg_down_node)) {
+		if (node_already_down(host)) {
 			needed = false;
 			free(host);
 			break;
@@ -610,59 +640,27 @@ end_it:
 		sleep(1);
 	}
 	
-	slurm_mutex_lock(&block_state_mutex);
-	error("Setting Block %s to ERROR state.", bg_record->bg_block_id);
-	bg_record->job_running = BLOCK_ERROR_STATE;
-	bg_record->state = RM_PARTITION_ERROR;
-	slurm_mutex_unlock(&block_state_mutex);
-	trigger_block_error();
+	put_block_in_error_state(bg_record, BLOCK_ERROR_STATE);
 	return;
 }
 
-#ifdef HAVE_BGL
-
-extern int set_ionodes(bg_record_t *bg_record)
+extern int set_ionodes(bg_record_t *bg_record, int io_start, int io_nodes)
 {
-	int i = 0;
-	int start_bit = 0;
-	int size = 0;
 	char bitstring[BITSIZE];
-	
-	if(!bg_record)
-		return SLURM_ERROR;
-	/* set the bitmap blank here if it is a full node we don't
-	   want anything set we also don't want the bg_record->ionodes set.
-	*/
-	bg_record->ionode_bitmap = bit_alloc(bluegene_numpsets);
-	if(bg_record->quarter == (uint16_t)NO_VAL) {
-		return SLURM_SUCCESS;
-	}
 
-	start_bit = bluegene_quarter_ionode_cnt*bg_record->quarter;
-	
-	if(bg_record->nodecard != (uint16_t)NO_VAL
-	   && bluegene_nodecard_ionode_cnt) {
-		start_bit += bluegene_nodecard_ionode_cnt*bg_record->nodecard;
-		size = bluegene_nodecard_ionode_cnt;
-	} else
-		size = bluegene_quarter_ionode_cnt;
-	size += start_bit;
-
-	if(size == start_bit) {
-		error("start bit is the same as the end bit %d", size);
+	if(!bg_record)
 		return SLURM_ERROR;
-	}
-	for(i=start_bit; i<size; i++)
-		bit_set(bg_record->ionode_bitmap, i);
 	
+	bg_record->ionode_bitmap = bit_alloc(bluegene_numpsets);
+	/* Set the correct ionodes being used in this block */
+	bit_nset(bg_record->ionode_bitmap, io_start, io_start+io_nodes);
 	bit_fmt(bitstring, BITSIZE, bg_record->ionode_bitmap);
 	bg_record->ionodes = xstrdup(bitstring);
-
 	return SLURM_SUCCESS;
 }
-#endif
 
-extern int add_bg_record(List records, List used_nodes, blockreq_t *blockreq)
+extern int add_bg_record(List records, List used_nodes, blockreq_t *blockreq,
+			 bool no_check, bitoff_t io_start)
 {
 	bg_record_t *bg_record = NULL;
 	ba_node_t *ba_node = NULL;
@@ -670,23 +668,15 @@ extern int add_bg_record(List records, List used_nodes, blockreq_t *blockreq)
 	uid_t pw_uid;
 	int i, len;
 	int small_count = 0;
-#ifdef HAVE_BGL
-	int node_cnt = 0;
-	uint16_t quarter = 0;
-	uint16_t nodecard = 0;
-	int small_size = 0;
-	bg_record_t *found_record = NULL;
-#endif
+
 	if(!records) {
 		fatal("add_bg_record: no records list given");
 	}
 	bg_record = (bg_record_t*) xmalloc(sizeof(bg_record_t));
 	
 	
-	bg_record->user_name = 
-		xstrdup(bg_slurm_user_name);
-	bg_record->target_name = 
-		xstrdup(bg_slurm_user_name);
+	bg_record->user_name = xstrdup(bg_slurm_user_name);
+	bg_record->target_name = xstrdup(bg_slurm_user_name);
 	
 	pw_uid = uid_from_string(bg_record->user_name);
 	if(pw_uid == (uid_t) -1) {
@@ -705,10 +695,8 @@ extern int add_bg_record(List records, List used_nodes, blockreq_t *blockreq)
 	/* bg_record->boot_state = 0; 	Implicit */
 	/* bg_record->state = 0;	Implicit */
 #ifdef HAVE_BGL
-	bg_record->quarter = (uint16_t)NO_VAL;
-	bg_record->nodecard = (uint16_t)NO_VAL;
 	debug2("asking for %s %d %d %s", 
-	       blockreq->block, blockreq->small128, blockreq->small32,
+	       blockreq->block, blockreq->small32, blockreq->small128,
 	       convert_conn_type(blockreq->conn_type));
 #else
 	debug2("asking for %s %d %d %d %d %d %s", 
@@ -746,7 +734,7 @@ extern int add_bg_record(List records, List used_nodes, blockreq_t *blockreq)
 	bg_record->node_use = SELECT_COPROCESSOR_MODE;
 #endif
 	bg_record->conn_type = blockreq->conn_type;
-	bg_record->cpus_per_bp = procs_per_node;
+	bg_record->cpu_cnt = procs_per_node * bg_record->bp_count;
 	bg_record->node_cnt = bluegene_bp_node_cnt * bg_record->bp_count;
 	bg_record->job_running = NO_JOB_RUNNING;
 
@@ -785,79 +773,8 @@ extern int add_bg_record(List records, List used_nodes, blockreq_t *blockreq)
 		}
 	} else {
 		debug("adding a small block");
-#ifdef HAVE_BGL // remove this clause when other works.  Only here to
-		// perserve old code 
-
-		/* if the ionode cnt for small32 is 0 then don't
-		   allow a nodecard allocation 
-		*/
-		if(!bluegene_nodecard_ionode_cnt) {
-			if(blockreq->small32) 
-				fatal("There is an error in your "
-				      "bluegene.conf file.\n"
-				      "Can't create a 32 node block with "
-				      "Numpsets=%u. (Try setting it to 64)",
-				      bluegene_numpsets);
-		}
-
-		if(blockreq->small32==0 && blockreq->small128==0) {
-			info("No specs given for this small block, "
-			     "I am spliting this block into 4 128CnBlocks");
-			blockreq->small128=4;
-		}		
-
-		i = (blockreq->small32*bluegene_nodecard_node_cnt) + 
-			(blockreq->small128*bluegene_quarter_node_cnt);
-		if(i != bluegene_bp_node_cnt)
-			fatal("There is an error in your bluegene.conf file.\n"
-			      "I am unable to request %d nodes consisting of "
-			      "%u 32CnBlocks and\n%u 128CnBlocks in one "
-			      "base partition with %u nodes.", 
-			      i, bluegene_bp_node_cnt, 
-			      blockreq->small32, blockreq->small128);
-		small_count = blockreq->small32+blockreq->small128; 
-		/* Automatically create 4-way split if 
-		 * conn_type == SELECT_SMALL in bluegene.conf
-		 * Here we go through each node listed and do the same thing
-		 * for each node.
-		 */
-		itr = list_iterator_create(bg_record->bg_block_list);
-		while ((ba_node = list_next(itr)) != NULL) {
-			/* break base partition up into 16 parts */
-			small_size = bluegene_bp_nodecard_cnt;
-			node_cnt = 0;
-			quarter = 0;
-			nodecard = 0;
-			for(i=0; i<small_count; i++) {
-				if(i == blockreq->small32) {
-					/* break base partition 
-					   up into 4 parts */
-					small_size = 4;
-				}
-									
-				if(small_size == 4)
-					nodecard = (uint16_t)NO_VAL;
-				else
-					nodecard = i%4; 
-				found_record = create_small_record(bg_record,
-								   quarter,
-								   nodecard);
-								 
-				/* this needs to be an append so we
-				   keep things in the order we got
-				   them, they will be sorted later */
-				list_append(records, found_record);
-				node_cnt += bluegene_bp_node_cnt/small_size;
-				if(node_cnt == 128) {
-					node_cnt = 0;
-					quarter++;
-				}
-			}
-		}
-		list_iterator_destroy(itr);
-		destroy_bg_record(bg_record);
-#else // remove this when testing.  Only here to perserve old code the
-      // code below is already for bgl
+		if(no_check)
+			goto no_check;
 		/* if the ionode cnt for small32 is 0 then don't
 		   allow a sub quarter allocation 
 		*/
@@ -934,6 +851,7 @@ extern int add_bg_record(List records, List used_nodes, blockreq_t *blockreq)
 			+ blockreq->small128
 			+ blockreq->small256; 
 #endif
+	no_check:
 		/* Automatically create 2-way split if 
 		 * conn_type == SELECT_SMALL in bluegene.conf
 		 * Here we go through each node listed and do the same thing
@@ -942,17 +860,15 @@ extern int add_bg_record(List records, List used_nodes, blockreq_t *blockreq)
 		itr = list_iterator_create(bg_record->bg_block_list);
 		while ((ba_node = list_next(itr)) != NULL) {
 			handle_small_record_request(records, blockreq,
-						    bg_record, 0);
+						    bg_record, io_start);
 		}
 		list_iterator_destroy(itr);
 		destroy_bg_record(bg_record);
-#endif // remove this when done testing
 	} 
 	
 	return SLURM_SUCCESS;
 }
 
-#ifndef HAVE_BGL
 extern int handle_small_record_request(List records, blockreq_t *blockreq,
 				       bg_record_t *bg_record, bitoff_t start)
 {
@@ -964,6 +880,9 @@ extern int handle_small_record_request(List records, blockreq_t *blockreq,
 	xassert(blockreq);
 	xassert(bg_record);
 
+	xassert(start >= 0);
+	xassert(start < bluegene_numpsets);
+
 #ifndef HAVE_BGL
 	for(i=0; i<blockreq->small16; i++) {
 		bit_nset(ionodes, start, start);
@@ -1036,7 +955,6 @@ extern int handle_small_record_request(List records, blockreq_t *blockreq,
 
 	return SLURM_SUCCESS;
 }
-#endif
 
 extern int format_node_name(bg_record_t *bg_record, char *buf, int buf_size)
 {
@@ -1050,6 +968,421 @@ extern int format_node_name(bg_record_t *bg_record, char *buf, int buf_size)
 	return SLURM_SUCCESS;
 }
 
+extern int down_nodecard(char *bp_name, bitoff_t io_start)
+{
+	List requests = NULL;
+	List delete_list = NULL;
+	ListIterator itr = NULL;
+	bg_record_t *bg_record = NULL, *found_record = NULL, tmp_record;
+	bg_record_t *smallest_bg_record = NULL;
+	struct node_record *node_ptr = NULL;
+	int bp_bit = 0;
+	static int io_cnt = NO_VAL;
+	static int create_size = NO_VAL;
+	static blockreq_t blockreq; 
+	int rc = SLURM_SUCCESS;
+
+	xassert(bp_name);
+
+	if(io_cnt == NO_VAL) {
+		io_cnt = 1;
+		/* Translate 1 nodecard count to ionode count */
+		if((io_cnt *= bluegene_io_ratio))
+			io_cnt--;
+		/* make sure we create something that is able to be
+		   created */
+		if(bluegene_smallest_block < bluegene_nodecard_node_cnt)
+			create_size = bluegene_nodecard_node_cnt;
+		else
+			create_size = bluegene_smallest_block;
+	}
+
+	node_ptr = find_node_record(bp_name);
+	if (!node_ptr) {
+		error ("down_sub_node_blocks: invalid node specified '%s'",
+		       bp_name);
+		return EINVAL;
+	}
+	bp_bit = (node_ptr - node_record_table_ptr);
+	
+	memset(&blockreq, 0, sizeof(blockreq_t));
+	
+	blockreq.conn_type = SELECT_SMALL;
+	blockreq.block = bp_name;
+
+	debug3("here setting %d of %d and %d-%d of %d",
+	       bp_bit, node_record_count, io_start, 
+	       io_start+io_cnt, bluegene_numpsets);
+
+	memset(&tmp_record, 0, sizeof(bg_record_t));
+	tmp_record.bp_count = 1;
+	tmp_record.node_cnt = bluegene_nodecard_node_cnt;
+	tmp_record.bitmap = bit_alloc(node_record_count);
+	bit_set(tmp_record.bitmap, bp_bit);
+
+	tmp_record.ionode_bitmap = bit_alloc(bluegene_numpsets);
+	bit_nset(tmp_record.ionode_bitmap, io_start, io_start+io_cnt);
+
+	slurm_mutex_lock(&block_state_mutex);
+	itr = list_iterator_create(bg_list);
+	while ((bg_record = list_next(itr))) {
+		if(!bit_test(bg_record->bitmap, bp_bit))
+			continue;
+		
+		if(!blocks_overlap(bg_record, &tmp_record)) 
+			continue;
+
+		if(bg_record->job_running > NO_JOB_RUNNING) 
+			slurm_fail_job(bg_record->job_running);
+
+		/* mark every one of these in an error state */
+		if(bluegene_layout_mode != LAYOUT_DYNAMIC) {
+			if(!delete_list)
+				delete_list = list_create(NULL);
+			list_append(delete_list, bg_record);
+			continue;
+		} 
+
+		/* below is only for dynamic modes since there are
+		   never overlapping blocks there */
+		/* if the block is smaller than the create size just
+		   continue on.
+		*/
+		if(bg_record->node_cnt < create_size)
+			continue;
+
+		if(!smallest_bg_record || 
+		   (smallest_bg_record->node_cnt > bg_record->node_cnt))
+			smallest_bg_record = bg_record;
+	}
+	list_iterator_destroy(itr);
+	slurm_mutex_unlock(&block_state_mutex);
+	
+	if(bluegene_layout_mode != LAYOUT_DYNAMIC) {
+		debug3("running non-dynamic mode");
+		if(delete_list) {
+			int cnt_set = 0;
+			/* don't lock here since it is handled inside
+			   the put_block_in_error_state
+			*/
+			itr = list_iterator_create(delete_list);
+			while ((bg_record = list_next(itr))) {
+				/* we already handled this */
+				if(bg_record->state == RM_PARTITION_ERROR) {
+					rc = SLURM_NO_CHANGE_IN_DATA;
+					continue;
+				}
+								
+				rc = put_block_in_error_state(
+					bg_record, BLOCK_ERROR_STATE);
+				cnt_set++;
+			}
+			if(cnt_set)
+				rc = SLURM_SUCCESS;
+			list_iterator_destroy(itr);
+			list_destroy(delete_list);
+			goto cleanup;
+		} 
+		
+		debug("didn't get a smallest block");
+		if(!node_already_down(bp_name)) {
+			time_t now = time(NULL);
+			char reason[128], time_str[32];
+			slurm_make_time_str(&now, time_str,
+					    sizeof(time_str));
+			snprintf(reason, sizeof(reason), 
+				 "select_bluegene: "
+				 "nodecard down [SLURM@%s]", 
+				 time_str); 
+			slurm_drain_nodes(bp_name, reason);
+		}
+		rc = SLURM_SUCCESS;
+		goto cleanup;
+	} 
+
+	
+	if(smallest_bg_record) {
+		debug2("smallest dynamic block is %s",
+		       smallest_bg_record->bg_block_id);
+		if(smallest_bg_record->state == RM_PARTITION_ERROR) {
+			rc = SLURM_NO_CHANGE_IN_DATA;
+			goto cleanup;
+		}
+		
+		while(smallest_bg_record->job_running > NO_JOB_RUNNING)
+			sleep(1);
+
+		if(smallest_bg_record->node_cnt == create_size) {
+			rc = put_block_in_error_state(
+				smallest_bg_record, BLOCK_ERROR_STATE);
+			goto cleanup;
+		} 
+
+		if(create_size > smallest_bg_record->node_cnt) {
+			/* we should never get here.  This means we
+			 * have a create_size that is bigger than a
+			 * block that is already made.
+			 */
+			rc = put_block_in_error_state(
+				smallest_bg_record, BLOCK_ERROR_STATE);
+			goto cleanup;
+		}
+		debug3("node count is %d", smallest_bg_record->node_cnt);
+		switch(smallest_bg_record->node_cnt) {
+#ifndef HAVE_BGL
+		case 64:
+			blockreq.small32 = 2;
+			break;
+		case 256:
+			blockreq.small32 = 8;
+			break;
+#endif
+		case 128:
+			blockreq.small32 = 4;			
+			break;
+		case 512:
+		default:
+			blockreq.small32 = 16;
+			break;
+		}
+
+		if(create_size != bluegene_nodecard_node_cnt) {
+			blockreq.small128 = blockreq.small32 / 4;
+			blockreq.small32 = 0;
+		}
+		/* set the start to be the same as the start of the
+		   ionode_bitmap.  If no ionodes set (not a small
+		   block) set io_start = 0. */
+		if((io_start = bit_ffs(smallest_bg_record->ionode_bitmap))
+		   == -1)
+			io_start = 0;
+	} else {
+		switch(create_size) {
+#ifndef HAVE_BGL
+		case 64:
+			blockreq.small64 = 8;
+			break;
+		case 256:
+			blockreq.small256 = 2;
+#endif
+		case 32:
+			blockreq.small32 = 16;
+			break;
+		case 128:
+			blockreq.small128 = 4;
+			break;
+		case 512:
+			if(!node_already_down(bp_name)) {
+				time_t now = time(NULL);
+				char reason[128], time_str[32];
+				slurm_make_time_str(&now, time_str,
+						    sizeof(time_str));
+				snprintf(reason, sizeof(reason), 
+					 "select_bluegene: "
+					 "nodecard down [SLURM@%s]", 
+					 time_str); 
+				slurm_drain_nodes(bp_name, reason);
+			}
+			rc = SLURM_SUCCESS;
+			goto cleanup;
+			break;
+		default:
+			error("Unknown create size of %d", create_size);
+			break;
+		}
+		/* since we don't have a block in this midplane
+		   we need to start at the beginning. */
+		io_start = 0;
+		/* we also need a bg_block to pretend to be the
+		   smallest block that takes up the entire midplane. */
+	}
+		
+	
+	/* Here we need to add blocks that take up nodecards on this
+	   midplane.  Since Slurm only keeps track of midplanes
+	   natively this is the only want to handle this case.
+	*/
+	requests = list_create(destroy_bg_record);
+	add_bg_record(requests, NULL, &blockreq, 1, io_start);
+		
+	delete_list = list_create(NULL);
+	while((bg_record = list_pop(requests))) {
+		slurm_mutex_lock(&block_state_mutex);
+		itr = list_iterator_create(bg_list);
+		while((found_record = list_next(itr))) {
+			if(!blocks_overlap(bg_record, found_record))
+				continue;
+			list_push(delete_list, found_record);
+			list_remove(itr);
+			num_block_to_free++;
+		}
+		list_iterator_destroy(itr);
+		slurm_mutex_unlock(&block_state_mutex);
+
+		/* we need to add this record since it doesn't exist */
+		if(configure_block(bg_record) == SLURM_ERROR) {
+			destroy_bg_record(bg_record);
+			error("down_sub_node_blocks: "
+			      "unable to configure block in api");
+			continue;
+		}
+
+		debug("adding block %s to fill in small blocks "
+		      "around bad nodecards",
+		      bg_record->bg_block_id);
+		print_bg_record(bg_record);
+		slurm_mutex_lock(&block_state_mutex);
+		list_append(bg_list, bg_record);
+		slurm_mutex_unlock(&block_state_mutex);
+		if(bit_overlap(bg_record->ionode_bitmap, 
+			       tmp_record.ionode_bitmap)) {
+			/* here we know the error block doesn't exist
+			   so just set the state here */
+			rc = put_block_in_error_state(
+				bg_record, BLOCK_ERROR_STATE);
+		}
+	}
+	list_destroy(requests);
+	
+	slurm_mutex_lock(&block_state_mutex);
+	free_block_list(delete_list);
+	list_destroy(delete_list);
+	sort_bg_record_inc_size(bg_list);
+	slurm_mutex_unlock(&block_state_mutex);
+	last_bg_update = time(NULL);	
+
+cleanup:
+	FREE_NULL_BITMAP(tmp_record.bitmap);
+	FREE_NULL_BITMAP(tmp_record.ionode_bitmap);
+
+	return rc;
+	
+}
+
+extern int up_nodecard(char *bp_name, bitstr_t *ionode_bitmap)
+{
+	ListIterator itr = NULL;
+	bg_record_t *bg_record = NULL;
+	struct node_record *node_ptr = NULL;
+	int bp_bit = 0;
+	int ret = 0;
+
+	xassert(bp_name);
+	xassert(ionode_bitmap);
+
+	node_ptr = find_node_record(bp_name);
+	if (!node_ptr) {
+		error ("down_sub_node_blocks: invalid node specified %s",
+		       bp_name);
+		return EINVAL;
+	}
+	bp_bit = (node_ptr - node_record_table_ptr);
+	
+	slurm_mutex_lock(&block_state_mutex);
+	itr = list_iterator_create(bg_list);
+	while((bg_record = list_next(itr))) {
+		if(bg_record->job_running != BLOCK_ERROR_STATE)
+			continue;
+		if(!bit_test(bg_record->bitmap, bp_bit))
+			continue;
+		
+		if(!bit_overlap(bg_record->ionode_bitmap, ionode_bitmap)) {
+			continue;
+		}
+		resume_block(bg_record);			
+	}
+	list_iterator_destroy(itr);
+	slurm_mutex_unlock(&block_state_mutex);
+	
+	/* FIX ME: This needs to call the opposite of
+	   slurm_drain_nodes which does not yet exist.
+	*/
+	if((ret = node_already_down(bp_name))) {
+		/* means it was drained */
+		if(ret == 2) {
+			/* debug("node %s put back into service after " */
+/* 			      "being in an error state", */
+/* 			      bp_name); */
+		}
+	}
+
+	return SLURM_SUCCESS;
+}
+
+extern int put_block_in_error_state(bg_record_t *bg_record, int state)
+{
+	uid_t pw_uid;
+
+	xassert(bg_record);
+	
+	/* only check this if the blocks are created, meaning this
+	   isn't at startup.
+	*/
+	if(blocks_are_created) {
+		/* Since we are putting this block in an error state we need
+		   to wait for the job to be removed.  We don't really
+		   need to free the block though since we may just
+		   want it to be in an error state for some reason. */
+		while(bg_record->job_running > NO_JOB_RUNNING)
+			sleep(1);
+	}
+	
+	info("Setting Block %s to ERROR state.", bg_record->bg_block_id);
+	/* we add the block to these lists so we don't try to schedule
+	   on them. */
+	if(!block_ptr_exist_in_list(bg_job_block_list, bg_record)) {
+		list_push(bg_job_block_list, bg_record);
+		num_unused_cpus -= bg_record->cpu_cnt;
+	}
+	if(!block_ptr_exist_in_list(bg_booted_block_list, bg_record)) 
+		list_push(bg_booted_block_list, bg_record);
+	
+	slurm_mutex_lock(&block_state_mutex);
+	bg_record->job_running = state;
+	bg_record->state = RM_PARTITION_ERROR;
+
+	xfree(bg_record->user_name);
+	xfree(bg_record->target_name);
+	bg_record->user_name = xstrdup(bg_slurm_user_name);
+	bg_record->target_name = xstrdup(bg_slurm_user_name);
+	
+	pw_uid = uid_from_string(bg_record->user_name);
+	if(pw_uid == (uid_t) -1) {
+		error("No such user: %s", bg_record->user_name);
+	} else {
+		bg_record->user_uid = pw_uid;
+	}
+	slurm_mutex_unlock(&block_state_mutex);
+
+	trigger_block_error();
+	last_bg_update = time(NULL);
+
+	return SLURM_SUCCESS;
+}
+
+/* block_state_mutex should be locked before calling */
+extern int resume_block(bg_record_t *bg_record)
+{
+	xassert(bg_record);
+
+	if(bg_record->job_running >= NO_JOB_RUNNING)
+		return SLURM_SUCCESS;
+
+	info("Block %s put back into service after "
+	     "being in an error state.",
+	      bg_record->bg_block_id);
+
+	if(remove_from_bg_list(bg_job_block_list, bg_record) == SLURM_SUCCESS) 
+		num_unused_cpus += bg_record->cpu_cnt;
+	remove_from_bg_list(bg_booted_block_list, bg_record);
+
+	bg_record->job_running = NO_JOB_RUNNING;
+	bg_record->state = RM_PARTITION_FREE;
+	last_bg_update = time(NULL);
+
+	return SLURM_SUCCESS;
+}
+
 /************************* local functions ***************************/
 
 #ifdef HAVE_BG
@@ -1125,6 +1458,8 @@ static int _ba_node_cmpf_inc(ba_node_t *node_a, ba_node_t *node_b)
 	      alpha_num[node_a->coord[Z]]); 
 	return 0;
 }
+
+
 #endif //HAVE_BG
 
 
diff --git a/src/plugins/select/bluegene/plugin/bg_record_functions.h b/src/plugins/select/bluegene/plugin/bg_record_functions.h
index 18934395a..1727e24fa 100644
--- a/src/plugins/select/bluegene/plugin/bg_record_functions.h
+++ b/src/plugins/select/bluegene/plugin/bg_record_functions.h
@@ -98,13 +98,9 @@ typedef struct bg_record {
 	int job_running;                /* job id of job running of if
 					 * block is in an error state
 					 * BLOCK_ERROR_STATE */
-	int cpus_per_bp;                /* count of cpus per base part */
-	uint32_t node_cnt;              /* count of nodes per block */
+	uint32_t cpu_cnt;               /* count of cpus per block */
+	uint32_t node_cnt;              /* count of cnodes per block */
 #ifdef HAVE_BGL
-	uint16_t quarter;               /* used for small blocks 
-					   determine quarter of BP */
-	uint16_t nodecard;              /* used for small blocks 
-					   determine nodecard of quarter */
 	char *blrtsimage;              /* BlrtsImage for this block */
 #endif
 	char *linuximage;              /* LinuxImage/CnloadImage for
@@ -120,6 +116,7 @@ typedef struct bg_record {
 extern void print_bg_record(bg_record_t *record);
 extern void destroy_bg_record(void *object);
 extern int block_exist_in_list(List my_list, bg_record_t *bg_record);
+extern int block_ptr_exist_in_list(List my_list, bg_record_t *bg_record);
 extern void process_nodes(bg_record_t *bg_reord, bool startup);
 extern List copy_bg_list(List in_list);
 extern void copy_bg_record(bg_record_t *fir_record, bg_record_t *sec_record);
@@ -134,14 +131,17 @@ extern bg_record_t *find_bg_record_in_list(List my_list, char *bg_block_id);
 extern int update_block_user(bg_record_t *bg_block_id, int set); 
 extern void drain_as_needed(bg_record_t *bg_record, char *reason);
 
-#ifdef HAVE_BGL
-extern int set_ionodes(bg_record_t *bg_record);
-#endif
+extern int set_ionodes(bg_record_t *bg_record, int io_start, int io_nodes);
 
-extern int add_bg_record(List records, List used_nodes, blockreq_t *blockreq);
+extern int add_bg_record(List records, List used_nodes, blockreq_t *blockreq,
+			 bool no_check, bitoff_t io_start);
 extern int handle_small_record_request(List records, blockreq_t *blockreq,
 				       bg_record_t *bg_record, bitoff_t start);
 
 extern int format_node_name(bg_record_t *bg_record, char *buf, int buf_size);
+extern int down_nodecard(char *bp_name, bitoff_t io_start);
+extern int up_nodecard(char *bp_name, bitstr_t *ionode_bitmap);
+extern int put_block_in_error_state(bg_record_t *bg_record, int state);
+extern int resume_block(bg_record_t *bg_record);
 
 #endif /* _BLUEGENE_BG_RECORD_FUNCTIONS_H_ */
diff --git a/src/plugins/select/bluegene/plugin/bg_switch_connections.c b/src/plugins/select/bluegene/plugin/bg_switch_connections.c
index 4cd5bab05..c8a382c28 100644
--- a/src/plugins/select/bluegene/plugin/bg_switch_connections.c
+++ b/src/plugins/select/bluegene/plugin/bg_switch_connections.c
@@ -2,7 +2,7 @@
  *  bg_switch_connections.c - Blue Gene switch management functions, 
  *  establish switch connections
  *
- *  $Id: bg_switch_connections.c 15919 2008-12-10 19:13:46Z da $
+ *  $Id: bg_switch_connections.c 17104 2009-04-01 17:20:31Z da $
  *****************************************************************************
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -127,7 +127,7 @@ static int _get_switches_by_bpid(
 
 	for (i=0; i<switch_num; i++) {
 		if(i) {
-			if ((rc = bridge_get_data(bg, RM_NextSwitch, 
+			if ((rc = bridge_get_data(my_bg, RM_NextSwitch, 
 						  &curr_switch)) 
 			    != STATUS_OK) {
 				fatal("bridge_get_data"
@@ -135,7 +135,7 @@ static int _get_switches_by_bpid(
 				      bg_err_str(rc));
 			}
 		} else {
-			if ((rc = bridge_get_data(bg, RM_FirstSwitch, 
+			if ((rc = bridge_get_data(my_bg, RM_FirstSwitch, 
 						  &curr_switch)) 
 			    != STATUS_OK) {
 				fatal("bridge_get_data"
@@ -170,31 +170,33 @@ static int _get_switches_by_bpid(
 static int _add_switch_conns(rm_switch_t* curr_switch, 
 			     ba_switch_t *ba_switch)
 {
-	ListIterator itr;
 	int firstconnect=1;
 
 	/* max number of connections in a switch */
 	int num_connections = 3;
 	ba_connection_t *ba_conn = NULL;
-	rm_connection_t conn;
+	rm_connection_t conn[num_connections];
+	rm_connection_t *conn_ptr = NULL;
 	int i, rc;
-	int conn_num=0;
 	int source = 0;
-	
-	for(i=0;i<num_connections;i++) {
+	List conn_list = list_create(NULL);
+	/* we have to figure out how may connections we have and then
+	   go through the loop again to actually add them */
+
+	for(i=0; i<num_connections; i++) {
 		/* set the source port(-) to check */
 		switch(i) {
 		case 0:
 			source = 1;
-			conn.p1 = RM_PORT_S1;
+			conn[i].p1 = RM_PORT_S1;
 			break;
 		case 1:
 			source = 2;
-			conn.p1 = RM_PORT_S2;
+			conn[i].p1 = RM_PORT_S2;
 			break;
 		case 2:
 			source = 4;
-			conn.p1 = RM_PORT_S4;
+			conn[i].p1 = RM_PORT_S4;
 			break;
 		default:
 			error("we are to far into the switch connections");
@@ -204,13 +206,13 @@ static int _add_switch_conns(rm_switch_t* curr_switch,
 		if(ba_conn->used && ba_conn->port_tar != source) {
 			switch(ba_conn->port_tar) {
 			case 0:
-				conn.p2 = RM_PORT_S0; 
+				conn[i].p2 = RM_PORT_S0; 
 				break;
 			case 3:
-				conn.p2 = RM_PORT_S3; 
+				conn[i].p2 = RM_PORT_S3; 
 				break;
 			case 5:
-				conn.p2 = RM_PORT_S5; 
+				conn[i].p2 = RM_PORT_S5; 
 				break;	
 			default:
 				error("we are trying to connection %d -> %d "
@@ -218,45 +220,15 @@ static int _add_switch_conns(rm_switch_t* curr_switch,
 				      source, ba_conn->port_tar);
 				break;	
 			}
-			conn.part_state = RM_PARTITION_READY;
-			
-			if(firstconnect) {
-				if ((rc = bridge_set_data(
-					     curr_switch, 
-					     RM_SwitchFirstConnection, 
-					     &conn)) 
-				    != STATUS_OK) {
-					list_iterator_destroy(itr);
-					
-					fatal("bridge_set_data"
-					      "(RM_SwitchFirstConnection): "
-					      "%s", 
-					      bg_err_str(rc));
-					return SLURM_ERROR;
-				}
-				firstconnect=0;
-			} else {
-				if ((rc = bridge_set_data(
-					     curr_switch, 
-					     RM_SwitchNextConnection,
-					     &conn)) 
-				    != STATUS_OK) {
-					list_iterator_destroy(itr);
-					
-					fatal("bridge_set_data"
-					      "(RM_SwitchNextConnection): %s",
-					      bg_err_str(rc));
-					return SLURM_ERROR;
-				}
-			} 
-			
-			conn_num++;
+			conn[i].part_state = RM_PARTITION_READY;
 			debug2("adding %d -> %d", source, ba_conn->port_tar);
+			list_push(conn_list, &conn[i]);
 		}
 	}
-	if(conn_num) {
-		if ((rc = bridge_set_data(curr_switch, RM_SwitchConnNum,
-					  &conn_num)) 
+	
+	i = list_count(conn_list);
+	if(i) {
+		if ((rc = bridge_set_data(curr_switch, RM_SwitchConnNum, &i))
 		    != STATUS_OK) {
 			fatal("bridge_set_data: RM_SwitchConnNum: %s",
 			      bg_err_str(rc));
@@ -265,9 +237,43 @@ static int _add_switch_conns(rm_switch_t* curr_switch,
 		} 
 	} else {
 		debug("we got a switch with no connections");
-		return SLURM_ERROR;
+		list_destroy(conn_list);
+                return SLURM_ERROR;
+	}
+
+	/* Now we can add them to the mix */
+	while((conn_ptr = list_pop(conn_list))) {
+		if(firstconnect) {
+			if ((rc = bridge_set_data(
+				     curr_switch, 
+				     RM_SwitchFirstConnection, 
+				     conn_ptr)) 
+			    != STATUS_OK) {
+				fatal("bridge_set_data"
+				      "(RM_SwitchFirstConnection): "
+				      "%s", 
+				      bg_err_str(rc));
+				list_destroy(conn_list);
+				return SLURM_ERROR;
+			}
+			firstconnect=0;
+		} else {
+			if ((rc = bridge_set_data(
+				     curr_switch, 
+				     RM_SwitchNextConnection,
+				     conn_ptr)) 
+			    != STATUS_OK) {
+				fatal("bridge_set_data"
+				      "(RM_SwitchNextConnection): %s",
+				      bg_err_str(rc));
+				list_destroy(conn_list);
+				return SLURM_ERROR;
+			}
+		} 		
 	}
 	
+	list_destroy(conn_list);
+
 	return SLURM_SUCCESS;
 }
 #endif
@@ -316,207 +322,21 @@ static int _used_switches(ba_node_t* ba_node)
 	return switch_count;
 }
 
-#ifdef HAVE_BGL
 extern int configure_small_block(bg_record_t *bg_record)
 {
 	int rc = SLURM_SUCCESS;
 #ifdef HAVE_BG_FILES	
 	bool small = true;
-	ListIterator itr;
-	ba_node_t* ba_node = NULL;
-	rm_BP_t *curr_bp = NULL;
-	rm_bp_id_t bp_id = NULL;
-	int num_ncards = 0;
-	rm_nodecard_t *ncard;
-	rm_nodecard_list_t *ncard_list = NULL;
-	rm_quarter_t quarter;
-	int num, i;
-#endif
-	if(bg_record->bp_count != 1) {
-		error("Requesting small block with %d bps, needs to be 1.",
-		      bg_record->bp_count);
-		return SLURM_ERROR;
-	}
-	
-#ifdef HAVE_BG_FILES	
-	/* set that we are doing a small block */
-	
-	if ((rc = bridge_set_data(bg_record->bg_block, RM_PartitionSmall, 
-				  &small)) != STATUS_OK) {
-		
-		fatal("bridge_set_data(RM_PartitionPsetsPerBP)", 
-		      bg_err_str(rc));
-	}
-
-	num_ncards = bg_record->node_cnt/bluegene_nodecard_node_cnt;
-	if(num_ncards < 1)
-		num_ncards = 1;
-
-	if ((rc = bridge_set_data(bg_record->bg_block,
-				  RM_PartitionNodeCardNum,
-				  &num_ncards))
-	    != STATUS_OK) {
-		
-		fatal("bridge_set_data: RM_PartitionBPNum: %s", 
-		      bg_err_str(rc));
-	}
-	
-			
-	itr = list_iterator_create(bg_record->bg_block_list);
-	ba_node = list_next(itr);
-	list_iterator_destroy(itr);
-
-	if (_get_bp_by_location(bg, ba_node->coord, &curr_bp) 
-	    == SLURM_ERROR) {
-		fatal("_get_bp_by_location()");
-	}
-	
-	/* Set the one BP */
-	
-	if ((rc = bridge_set_data(bg_record->bg_block,
-				  RM_PartitionBPNum,
-				  &bg_record->bp_count)) 
-	    != STATUS_OK) {
-		
-		fatal("bridge_set_data: RM_PartitionBPNum: %s", 
-		      bg_err_str(rc));
-		return SLURM_ERROR;
-	}	
-	if ((rc = bridge_set_data(bg_record->bg_block,
-				  RM_PartitionFirstBP, 
-				  curr_bp)) 
-	    != STATUS_OK) {
-		
-		fatal("bridge_set_data("
-		      "BRIDGE_PartitionFirstBP): %s", 
-		      bg_err_str(rc));
-		return SLURM_ERROR;
-	}
-	
-	
-	/* find the bp_id of the bp to get the small32 */
-	if ((rc = bridge_get_data(curr_bp, RM_BPID, &bp_id))
-	    != STATUS_OK) {
-		error("bridge_get_data(): %d", rc);
-		return SLURM_ERROR;
-	}
-
-	
-	if(!bp_id) {
-		error("No BP ID was returned from database");
-		return SLURM_ERROR;
-	}
-
-	if ((rc = bridge_get_nodecards(bp_id, &ncard_list))
-	    != STATUS_OK) {
-		error("bridge_get_nodecards(%s): %d",
-		      bp_id, rc);
-		free(bp_id);
-		return SLURM_ERROR;
-	}
-	free(bp_id);
-		
-			
-	if((rc = bridge_get_data(ncard_list, RM_NodeCardListSize, &num))
-	   != STATUS_OK) {
-		error("bridge_get_data(RM_NodeCardListSize): %s",
-		      bg_err_str(rc));
-		return SLURM_ERROR;
-	}
-	num_ncards = 0;
-	for(i=0; i<num; i++) {
-		if (i) {
-			if ((rc = bridge_get_data(ncard_list, 
-						  RM_NodeCardListNext, 
-						  &ncard)) != STATUS_OK) {
-				error("bridge_get_data"
-				      "(RM_NodeCardListNext): %s",
-				      rc);
-				rc = SLURM_ERROR;
-				goto cleanup;
-			}
-		} else {
-			if ((rc = bridge_get_data(ncard_list, 
-						  RM_NodeCardListFirst, 
-						  &ncard)) != STATUS_OK) {
-				error("bridge_get_data"
-				      "(RM_NodeCardListFirst): %s",
-				      rc);
-				rc = SLURM_ERROR;
-				goto cleanup;
-			}
-		}
-		
-		if ((rc = bridge_get_data(ncard, 
-					  RM_NodeCardQuarter, 
-					  &quarter)) != STATUS_OK) {
-			error("bridge_get_data(RM_NodeCardQuarter): %d",rc);
-			rc = SLURM_ERROR;
-			goto cleanup;
-		}
-		if(bg_record->quarter != quarter)
-			continue;
-		if(bg_record->nodecard != (uint16_t) NO_VAL) {
-			if(bg_record->nodecard != (i%4))
-				continue;
-		}
-
-		
-		if (num_ncards) {
-			if ((rc = bridge_set_data(bg_record->bg_block,
-						  RM_PartitionNextNodeCard, 
-						  ncard)) 
-			    != STATUS_OK) {
-				
-				fatal("bridge_set_data("
-				      "RM_PartitionNextNodeCard): %s", 
-				      bg_err_str(rc));
-			}
-		} else {
-			if ((rc = bridge_set_data(bg_record->bg_block,
-						  RM_PartitionFirstNodeCard, 
-						  ncard)) 
-			    != STATUS_OK) {
-				
-				fatal("bridge_set_data("
-				      "RM_PartitionFirstNodeCard): %s", 
-				      bg_err_str(rc));
-			}
-		}
-		
-		num_ncards++;
-		if(num_ncards == 4)
-			break;
-	}
-cleanup:
-	if ((rc = bridge_free_nodecard_list(ncard_list)) != STATUS_OK) {
-		error("bridge_free_nodecard_list(): %s", bg_err_str(rc));
-		return SLURM_ERROR;
-	}
-#endif
-	debug2("making the small block");
-	return rc;
-}
-
-#else
-
-extern int configure_small_block(bg_record_t *bg_record)
-{
-	int rc = SLURM_SUCCESS;
-#ifdef HAVE_BG_FILES	
-	bool small = true;
-	ListIterator itr;
 	ba_node_t* ba_node = NULL;
 	rm_BP_t *curr_bp = NULL;
 	rm_bp_id_t bp_id = NULL;
+#ifndef HAVE_BGL
 	rm_nodecard_id_t nc_char = NULL;
+#endif
 	int nc_id = 0;
-	int num_ncards = 0, sub_nodecard = 0, ionode_card = 0;
+	int num_ncards = 0, sub_nodecard = 0, ionode_card = 0, nc_count = 0;
 	rm_nodecard_t *ncard;
 	rm_nodecard_list_t *ncard_list = NULL;
-#ifdef HAVE_BGL
-	rm_quarter_t quarter;
-#endif
 	int num, i;
 	int use_nc[bluegene_bp_nodecard_cnt];
 	double nc_pos = 0;
@@ -527,7 +347,8 @@ extern int configure_small_block(bg_record_t *bg_record)
 		      bg_record->bp_count);
 		return SLURM_ERROR;
 	}
-	
+/* 	info("configuring small block on ionodes %s out of %d ncs",  */
+/* 	     bg_record->ionodes, bluegene_bp_nodecard_cnt); */
 #ifdef HAVE_BG_FILES	
 	/* set that we are doing a small block */
 	if ((rc = bridge_set_data(bg_record->bg_block, RM_PartitionSmall, 
@@ -570,10 +391,7 @@ extern int configure_small_block(bg_record_t *bg_record)
 		      bg_err_str(rc));
 	}
 	
-			
-	itr = list_iterator_create(bg_record->bg_block_list);
-	ba_node = list_next(itr);
-	list_iterator_destroy(itr);
+	ba_node = list_peek(bg_record->bg_block_list);
 
 	if (_get_bp_by_location(bg, ba_node->coord, &curr_bp) 
 	    == SLURM_ERROR) {
@@ -632,7 +450,12 @@ extern int configure_small_block(bg_record_t *bg_record)
 		      bg_err_str(rc));
 		return SLURM_ERROR;
 	}
-	num_ncards = 0;
+	if(num_ncards > num) {
+		error("You requested more (%d > %d) nodecards "
+		      "than are available on this block %s",
+		      num_ncards, num, bg_record->nodes);
+	}
+
 	for(i=0; i<num; i++) {
 		if (i) {
 			if ((rc = bridge_get_data(ncard_list, 
@@ -656,6 +479,15 @@ extern int configure_small_block(bg_record_t *bg_record)
 			}
 		}
 		
+#ifdef HAVE_BGL
+		/* on BG/L we assume the order never changes when the
+		   system is up.  This could change when a reboot of
+		   the system happens, but that should be rare.
+		*/
+		nc_id = i;
+		if(!use_nc[i]) 
+			continue;
+#else
 		if ((rc = bridge_get_data(ncard, 
 					  RM_NodeCardID, 
 					  &nc_char)) != STATUS_OK) {
@@ -667,7 +499,7 @@ extern int configure_small_block(bg_record_t *bg_record)
 		if(!nc_char) {
 			error("No NodeCard ID was returned from database");
 			rc = SLURM_ERROR;
-			goto cleanup
+			goto cleanup;
 		}
 
 		nc_id = atoi((char*)nc_char+1);
@@ -750,9 +582,9 @@ extern int configure_small_block(bg_record_t *bg_record)
 			}			
 		}
 		free(nc_char);
+#endif
 
-
-		if (num_ncards) {
+		if (nc_count) {
 			if ((rc = bridge_set_data(bg_record->bg_block,
 						  RM_PartitionNextNodeCard, 
 						  ncard)) 
@@ -778,8 +610,8 @@ extern int configure_small_block(bg_record_t *bg_record)
 			}
 		}
 		
-		num_ncards++;
-
+		nc_count++;
+#ifndef HAVE_BGL
 		if(sub_nodecard) {
 			if((rc = bridge_free_nodecard(ncard)) != STATUS_OK) {
 				error("bridge_free_nodecard(): %s", 
@@ -788,6 +620,9 @@ extern int configure_small_block(bg_record_t *bg_record)
 				goto cleanup;
 			}
 		}
+#endif
+		if(nc_count == num_ncards)
+			break;
 	}
 cleanup:
 	if ((rc = bridge_free_nodecard_list(ncard_list)) != STATUS_OK) {
@@ -798,7 +633,6 @@ cleanup:
 	debug2("making the small block");
 	return rc;
 }
-#endif
 
 /**
  * connect the given switch up with the given connections
@@ -825,7 +659,7 @@ extern int configure_block_switches(bg_record_t * bg_record)
 	bg_record->bp_count = 0;
 	
 	itr = list_iterator_create(bg_record->bg_block_list);
-	while ((ba_node = (ba_node_t *) list_next(itr)) != NULL) {
+	while ((ba_node = list_next(itr))) {
 		if(ba_node->used) {
 			bg_record->bp_count++;
 		}
@@ -857,7 +691,7 @@ extern int configure_block_switches(bg_record_t * bg_record)
 	debug3("switch count %d", bg_record->switch_count);
 
 	list_iterator_reset(itr);
-	while ((ba_node = (ba_node_t *) list_next(itr)) != NULL) {
+	while ((ba_node = list_next(itr))) {
 #ifdef HAVE_BG_FILES
 		if (_get_bp_by_location(bg, ba_node->coord, &curr_bp) 
 		    == SLURM_ERROR) {
diff --git a/src/plugins/select/bluegene/plugin/block_sys.c b/src/plugins/select/bluegene/plugin/block_sys.c
index f2d7c156c..6abd01ef7 100755
--- a/src/plugins/select/bluegene/plugin/block_sys.c
+++ b/src/plugins/select/bluegene/plugin/block_sys.c
@@ -1,7 +1,7 @@
 /*****************************************************************************\
  *  block_sys.c - component used for wiring up the blocks
  *
- *  $Id: block_sys.c 16146 2009-01-06 18:20:48Z da $
+ *  $Id: block_sys.c 17162 2009-04-06 20:18:23Z da $
  *****************************************************************************
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -119,6 +119,9 @@ static void _pre_allocate(bg_record_t *bg_record)
 		error("bridge_set_data(RM_PartitionRamdiskImg)", 
 		      bg_err_str(rc));
 #else
+	struct tm my_tm;
+	struct timeval my_tv;
+
 	if ((rc = bridge_set_data(bg_record->bg_block,
 				  RM_PartitionCnloadImg,
 				  bg_record->linuximage)) != STATUS_OK) 
@@ -130,6 +133,15 @@ static void _pre_allocate(bg_record_t *bg_record)
 		error("bridge_set_data(RM_PartitionIoloadImg)", 
 		      bg_err_str(rc));
 
+	gettimeofday(&my_tv, NULL);
+	localtime_r(&my_tv.tv_sec, &my_tm);
+	bg_record->bg_block_id = xstrdup_printf(
+		"RMP%2.2d%2.2s%2.2d%2.2d%2.2d%3.3d",
+		my_tm.tm_mday, mon_abbr(my_tm.tm_mon), 
+		my_tm.tm_hour, my_tm.tm_min, my_tm.tm_sec, my_tv.tv_usec/1000);
+	if ((rc = bridge_set_data(bg_record->bg_block, RM_PartitionID,
+				  bg_record->bg_block_id)) != STATUS_OK)
+		error("bridge_set_data(RM_PartitionID)", bg_err_str(rc));
 #endif
 	if ((rc = bridge_set_data(bg_record->bg_block, RM_PartitionMloaderImg, 
 				  bg_record->mloaderimage)) != STATUS_OK)
@@ -155,11 +167,6 @@ static void _pre_allocate(bg_record_t *bg_record)
 	    != STATUS_OK)
 		error("bridge_set_data(RM_PartitionUserName)", bg_err_str(rc));
 	
-/* 	info("setting it here"); */
-/* 	bg_record->bg_block_id = "RMP101"; */
-/* 	if ((rc = bridge_set_data(bg_record->bg_block, RM_PartitionID,  */
-/* 			&bg_record->bg_block_id)) != STATUS_OK) */
-/* 		error("bridge_set_data(RM_PartitionID)", bg_err_str(rc)); */
 #endif
 }
 
@@ -262,8 +269,7 @@ static int _post_allocate(bg_record_t *bg_record)
 
 #ifdef HAVE_BG_FILES
 #ifdef HAVE_BGL
-static int _find_nodecard(bg_record_t *bg_record, 
-			  rm_partition_t *block_ptr)
+static int _find_nodecard(rm_partition_t *block_ptr, int *nc_id)
 {
 	char *my_card_name = NULL;
 	char *card_name = NULL;
@@ -275,6 +281,9 @@ static int _find_nodecard(bg_record_t *bg_record,
 	rm_nodecard_t *ncard = NULL;
 	rm_BP_t *curr_bp = NULL;
 	
+	xassert(block_ptr);
+	xassert(nc_id);
+
 	if((rc = bridge_get_data(block_ptr,
 				 RM_PartitionFirstNodeCard,
 				 &ncard))
@@ -349,12 +358,12 @@ static int _find_nodecard(bg_record_t *bg_record,
 			rc = SLURM_ERROR;
 			goto cleanup;
 		}
-		if(strcmp(my_card_name,card_name)) {
+		if(strcmp(my_card_name, card_name)) {
 			free(card_name);
 			continue;
 		}
 		free(card_name);
-		bg_record->nodecard = (i%4);
+		(*nc_id) = i;
 		break;
 	}
 cleanup:
@@ -372,7 +381,7 @@ extern int configure_block(bg_record_t *bg_record)
 #endif
 	_pre_allocate(bg_record);
 
-	if(bg_record->cpus_per_bp < procs_per_node)
+	if(bg_record->cpu_cnt < procs_per_node)
 		configure_small_block(bg_record);
 	else
 		configure_block_switches(bg_record);
@@ -389,7 +398,7 @@ int read_bg_blocks()
 {
 	int rc = SLURM_SUCCESS;
 
-	int bp_cnt, i;
+	int bp_cnt, i, nc_cnt, io_cnt;
 	rm_element_t *bp_ptr = NULL;
 	rm_bp_id_t bpid;
 	rm_partition_t *block_ptr = NULL;
@@ -404,11 +413,8 @@ int read_bg_blocks()
 	rm_partition_list_t *block_list = NULL;
 	rm_partition_state_flag_t state = PARTITION_ALL_FLAG;
 	rm_nodecard_t *ncard = NULL;
-#ifdef HAVE_BGL
-	rm_quarter_t quarter;
-#else
 	int nc_id, io_start;
-#endif
+
 	bool small = false;
 	hostlist_t hostlist;		/* expanded form of hosts */
 
@@ -465,6 +471,7 @@ int read_bg_blocks()
 			free(tmp_char);
 			continue;
 		}
+
 		if(bg_recover) {
 			if ((rc = bridge_get_block(tmp_char, &block_ptr))
 			    != STATUS_OK) {
@@ -484,51 +491,45 @@ int read_bg_blocks()
 		free(tmp_char);
 
 		bg_record->state = NO_VAL;
-#ifdef HAVE_BGL
-		bg_record->quarter = (uint16_t) NO_VAL;
-		bg_record->nodecard = (uint16_t) NO_VAL;
-#endif
-		bg_record->job_running = NO_JOB_RUNNING;
-		
+#ifndef HAVE_BGL
 		if ((rc = bridge_get_data(block_ptr, 
-					  RM_PartitionBPNum, 
+					  RM_PartitionSize, 
 					  &bp_cnt)) 
 		    != STATUS_OK) {
-			error("bridge_get_data(RM_BPNum): %s", 
+			error("bridge_get_data(RM_PartitionSize): %s", 
 			      bg_err_str(rc));
-			bp_cnt = 0;
+			goto clean_up;
 		}
 				
 		if(bp_cnt==0)
 			goto clean_up;
-		bg_record->bp_count = bp_cnt;
 
-#ifndef HAVE_BGL
+		bg_record->node_cnt = bp_cnt;
+		bg_record->cpu_cnt = bluegene_proc_ratio * bg_record->node_cnt;
+#endif
+		bg_record->job_running = NO_JOB_RUNNING;
+		
 		if ((rc = bridge_get_data(block_ptr, 
-					  RM_PartitionSize, 
+					  RM_PartitionBPNum, 
 					  &bp_cnt)) 
 		    != STATUS_OK) {
-			error("bridge_get_data(RM_PartitionSize): %s", 
+			error("bridge_get_data(RM_BPNum): %s", 
 			      bg_err_str(rc));
-			bp_cnt = 0;
+			goto clean_up;
 		}
 				
 		if(bp_cnt==0)
 			goto clean_up;
+		bg_record->bp_count = bp_cnt;
 
-		bg_record->node_cnt = bp_cnt;
-		bg_record->cpus_per_bp = 
-			bluegene_proc_ratio * bg_record->node_cnt;
-
-#endif
-		debug3("has %d BPs",
-		       bg_record->bp_count);
+		debug3("has %d BPs", bg_record->bp_count);
 		
 		if ((rc = bridge_get_data(block_ptr, RM_PartitionSwitchNum,
 					  &bg_record->switch_count)) 
 		    != STATUS_OK) {
 			error("bridge_get_data(RM_PartitionSwitchNum): %s",
 			      bg_err_str(rc));
+			goto clean_up;
 		} 
 
 		if ((rc = bridge_get_data(block_ptr, RM_PartitionSmall, 
@@ -536,10 +537,40 @@ int read_bg_blocks()
 		    != STATUS_OK) {
 			error("bridge_get_data(RM_PartitionSmall): %s",
 			      bg_err_str(rc));
-			bp_cnt = 0;
+			goto clean_up;
 		}
-
+		
 		if(small) {
+			if ((rc = bridge_get_data(block_ptr,
+						  RM_PartitionOptions,
+						  &tmp_char))
+			    != STATUS_OK) {
+				error("bridge_get_data(RM_PartitionOptions): "
+				      "%s", bg_err_str(rc));
+				goto clean_up;
+			} else if(tmp_char) {
+				switch(tmp_char[0]) {
+				case 's':
+					bg_record->conn_type = SELECT_HTC_S;
+					break;
+				case 'd':
+					bg_record->conn_type = SELECT_HTC_D;
+					break;
+				case 'v':
+					bg_record->conn_type = SELECT_HTC_V;
+					break;
+				case 'l':
+					bg_record->conn_type = SELECT_HTC_L;
+					break;
+				default:
+					bg_record->conn_type = SELECT_SMALL;
+					break;
+				}
+				
+				free(tmp_char);
+			} else
+				bg_record->conn_type = SELECT_SMALL;
+
 			if((rc = bridge_get_data(block_ptr,
 						 RM_PartitionFirstNodeCard,
 						 &ncard))
@@ -547,56 +578,53 @@ int read_bg_blocks()
 				error("bridge_get_data("
 				      "RM_PartitionFirstNodeCard): %s",
 				      bg_err_str(rc));
-				bp_cnt = 0;
+				goto clean_up;
 			}
 			
-			bg_record->conn_type = SELECT_SMALL;
 			if((rc = bridge_get_data(block_ptr,
 						 RM_PartitionNodeCardNum,
-						 &i))
+						 &nc_cnt))
 			   != STATUS_OK) {
 				error("bridge_get_data("
 				      "RM_PartitionNodeCardNum): %s",
 				      bg_err_str(rc));
-				bp_cnt = 0;
+				goto clean_up;
 			}
 #ifdef HAVE_BGL
-			if(i == 1) {
-				_find_nodecard(bg_record, block_ptr);
-				i = bluegene_bp_nodecard_cnt;
-			} 
+			/* Translate nodecard count to ionode count */
+			if((io_cnt = nc_cnt * bluegene_io_ratio))
+				io_cnt--;
+
+			nc_id = 0;
+			if(nc_cnt == 1) 
+				_find_nodecard(block_ptr, &nc_id);
+			
+			bg_record->node_cnt = 
+				nc_cnt * bluegene_nodecard_node_cnt;
+			bg_record->cpu_cnt =
+				bluegene_proc_ratio * bg_record->node_cnt;
+
 			if ((rc = bridge_get_data(ncard, 
 						  RM_NodeCardQuarter, 
-						  &quarter)) != STATUS_OK) {
+						  &io_start)) != STATUS_OK) {
 				error("bridge_get_data(CardQuarter): %d",rc);
-				bp_cnt = 0;
+				goto clean_up;
 			}
-			bg_record->quarter = quarter;
-
-
-			debug3("%s is in quarter %d nodecard %d",
-			       bg_record->bg_block_id,
-			       bg_record->quarter,
-			       bg_record->nodecard);
-			bg_record->cpus_per_bp = procs_per_node/i;
-			bg_record->node_cnt = bluegene_bp_node_cnt/i;
-			if(set_ionodes(bg_record) == SLURM_ERROR) 
-				error("couldn't create ionode_bitmap "
-				      "for %d.%d",
-				      bg_record->quarter, bg_record->nodecard);
+			io_start *= bluegene_quarter_ionode_cnt;
+			io_start += bluegene_nodecard_ionode_cnt * (nc_id%4);
 #else
 			/* Translate nodecard count to ionode count */
-			if((i *= bluegene_io_ratio))
-				i--;
+			if((io_cnt = nc_cnt * bluegene_io_ratio))
+				io_cnt--;
 
 			if ((rc = bridge_get_data(ncard, 
 						  RM_NodeCardID, 
 						  &tmp_char)) != STATUS_OK) {
 				error("bridge_get_data(RM_NodeCardID): %d",rc);
-				bp_cnt = 0;
+				goto clean_up;
 			}
 			
-			if(bp_cnt==0)
+			if(!tmp_char)
 				goto clean_up;
 			
 			/* From the first nodecard id we can figure
@@ -605,15 +633,54 @@ int read_bg_blocks()
 			nc_id = atoi((char*)tmp_char+1);
 			free(tmp_char);
 			io_start = nc_id * bluegene_io_ratio;
-			bg_record->ionode_bitmap = bit_alloc(bluegene_numpsets);
-			/* Set the correct ionodes being used in this
-			   block */
-			bit_nset(bg_record->ionode_bitmap,
-				 io_start, io_start+i);
+			if(bg_record->node_cnt < bluegene_nodecard_node_cnt) {
+				rm_ionode_t *ionode;
+
+				/* figure out the ionode we are using */
+				if ((rc = bridge_get_data(
+					     ncard, 
+					     RM_NodeCardFirstIONode, 
+					     &ionode)) != STATUS_OK) {
+					error("bridge_get_data("
+					      "RM_NodeCardFirstIONode): %d",
+					      rc);
+					goto clean_up;
+				}
+				if ((rc = bridge_get_data(ionode,
+							  RM_IONodeID, 
+							  &tmp_char)) 
+				    != STATUS_OK) {				
+					error("bridge_get_data("
+					      "RM_NodeCardIONodeNum): %s", 
+					      bg_err_str(rc));
+					rc = SLURM_ERROR;
+					goto clean_up;
+				}			
+				
+				if(!tmp_char)
+					goto clean_up;
+				/* just add the ionode num to the
+				 * io_start */
+				io_start += atoi((char*)tmp_char+1);
+				free(tmp_char);
+				/* make sure i is 0 since we are only using
+				 * 1 ionode */
+				io_cnt = 0;
+			}
 #endif
+
+			if(set_ionodes(bg_record, io_start, io_cnt)
+			   == SLURM_ERROR)
+				error("couldn't create ionode_bitmap "
+				      "for ionodes %d to %d",
+				      io_start, io_start+io_cnt);
+			debug3("%s uses ionodes %s",
+			       bg_record->bg_block_id,
+			       bg_record->ionodes);
 		} else {
 #ifdef HAVE_BGL
-			bg_record->cpus_per_bp = procs_per_node;
+			bg_record->cpu_cnt = procs_per_node 
+				* bg_record->bp_count;
 			bg_record->node_cnt =  bluegene_bp_node_cnt
 				* bg_record->bp_count;
 #endif
@@ -624,14 +691,14 @@ int read_bg_blocks()
 				error("bridge_get_data"
 				      "(RM_PartitionConnection): %s",
 				      bg_err_str(rc));
+				goto clean_up;
 			}
 			/* Set the bitmap blank here if it is a full
 			   node we don't want anything set we also
 			   don't want the bg_record->ionodes set.
 			*/
 			bg_record->ionode_bitmap = bit_alloc(bluegene_numpsets);
-		}
-		
+		}		
 		
 		bg_record->bg_block_list =
 			get_and_set_block_wiring(bg_record->bg_block_id);
@@ -640,11 +707,6 @@ int read_bg_blocks()
 			      bg_record->bg_block_id);
 		hostlist = hostlist_create(NULL);
 
-		/* this needs to be changed for small blocks,
-		   we just don't know what they are suppose to look 
-		   like just yet. 
-		*/
-
 		for (i=0; i<bp_cnt; i++) {
 			if(i) {
 				if ((rc = bridge_get_data(block_ptr, 
@@ -726,6 +788,7 @@ int read_bg_blocks()
 					  &bg_record->state)) != STATUS_OK) {
 			error("bridge_get_data(RM_PartitionState): %s",
 			      bg_err_str(rc));
+			goto clean_up;
 		} else if(bg_record->state == RM_PARTITION_CONFIGURING)
 			bg_record->boot_state = 1;
 		else
@@ -736,7 +799,10 @@ int read_bg_blocks()
 		       bg_record->state);
 		
 		process_nodes(bg_record, false);
-	
+
+		/* We can stop processing information now since we
+		   don't need to rest of the information to decide if
+		   this is the correct block. */
 		if(bluegene_layout_mode == LAYOUT_DYNAMIC) {
 			bg_record_t *tmp_record = xmalloc(sizeof(bg_record_t));
 			copy_bg_record(bg_record, tmp_record);
@@ -747,6 +813,7 @@ int read_bg_blocks()
 					  &bp_cnt)) != STATUS_OK) {
 			error("bridge_get_data(RM_PartitionUsersNum): %s",
 			      bg_err_str(rc));
+			goto clean_up;
 		} else {
 			if(bp_cnt==0) {
 				
@@ -765,6 +832,7 @@ int read_bg_blocks()
 					error("bridge_get_data"
 					      "(RM_PartitionFirstUser): %s",
 					      bg_err_str(rc));
+					goto clean_up;
 				}
 				if(!user_name) {
 					error("No user name was "
@@ -802,6 +870,7 @@ int read_bg_blocks()
 		    != STATUS_OK) {
 			error("bridge_get_data(RM_PartitionBlrtsImg): %s",
 			      bg_err_str(rc));
+			goto clean_up;
 		}
 		if(!user_name) {
 			error("No BlrtsImg was returned from database");
@@ -815,6 +884,7 @@ int read_bg_blocks()
 		    != STATUS_OK) {
 			error("bridge_get_data(RM_PartitionLinuxImg): %s",
 			      bg_err_str(rc));
+			goto clean_up;
 		}
 		if(!user_name) {
 			error("No LinuxImg was returned from database");
@@ -828,6 +898,7 @@ int read_bg_blocks()
 		    != STATUS_OK) {
 			error("bridge_get_data(RM_PartitionRamdiskImg): %s",
 			      bg_err_str(rc));
+			goto clean_up;
 		}
 		if(!user_name) {
 			error("No RamdiskImg was returned from database");
@@ -842,6 +913,7 @@ int read_bg_blocks()
 		    != STATUS_OK) {
 			error("bridge_get_data(RM_PartitionCnloadImg): %s",
 			      bg_err_str(rc));
+			goto clean_up;
 		}
 		if(!user_name) {
 			error("No CnloadImg was returned from database");
@@ -855,6 +927,7 @@ int read_bg_blocks()
 		    != STATUS_OK) {
 			error("bridge_get_data(RM_PartitionIoloadImg): %s",
 			      bg_err_str(rc));
+			goto clean_up;
 		}
 		if(!user_name) {
 			error("No IoloadImg was returned from database");
@@ -869,6 +942,7 @@ int read_bg_blocks()
 		    != STATUS_OK) {
 			error("bridge_get_data(RM_PartitionMloaderImg): %s",
 			      bg_err_str(rc));
+			goto clean_up;
 		}
 		if(!user_name) {
 			error("No MloaderImg was returned from database");
@@ -878,9 +952,8 @@ int read_bg_blocks()
 
 					
 	clean_up:	
-		if (bg_recover
-		    &&  ((rc = bridge_free_block(block_ptr)) 
-			 != STATUS_OK)) {
+		if (bg_recover 
+		    && ((rc = bridge_free_block(block_ptr)) != STATUS_OK)) {
 			error("bridge_free_block(): %s", bg_err_str(rc));
 		}
 	}
@@ -889,7 +962,7 @@ int read_bg_blocks()
 	return rc;
 }
 
-#else
+#endif
 
 extern int load_state_file(char *dir_name)
 {
@@ -912,6 +985,10 @@ extern int load_state_file(char *dir_name)
 	uid_t my_uid;
 	int ionodes = 0;
 	char *name = NULL;
+	struct part_record *part_ptr = NULL;
+	char *non_usable_nodes = NULL;
+	bitstr_t *bitmap = NULL;
+	ListIterator itr = NULL;
 
 	if(!dir_name) {
 		debug2("Starting bluegene with clean slate");
@@ -978,9 +1055,50 @@ extern int load_state_file(char *dir_name)
 		error("select_p_state_restore: problem unpacking node_info");
 		goto unpack_error;
 	}
+
+#ifdef HAVE_BG_FILES
+	for (i=0; i<node_select_ptr->record_count; i++) {
+		bg_info_record = &(node_select_ptr->bg_info_array[i]);
+		
+		/* we only care about the states we need here
+		 * everthing else should have been set up already */
+		if(bg_info_record->state == RM_PARTITION_ERROR) {
+			if((bg_record = find_bg_record_in_list(
+				    bg_curr_block_list,
+				    bg_info_record->bg_block_id)))
+				/* put_block_in_error_state should be
+				   called after the bg_list has been
+				   made.  We can't call it here since
+				   this record isn't the record kept
+				   around in bg_list.
+				*/
+				bg_record->state = bg_info_record->state;
+		}
+	}
+
+	select_g_free_node_info(&node_select_ptr);
+	free_buf(buffer);
+	return SLURM_SUCCESS;
+#endif
+
 	slurm_mutex_lock(&block_state_mutex);
 	reset_ba_system(false);
 
+	/* Locks are already in place to protect part_list here */
+	bitmap = bit_alloc(node_record_count);
+	itr = list_iterator_create(part_list);
+	while ((part_ptr = list_next(itr))) {
+		/* we only want to use bps that are in partitions
+		 */
+		bit_or(bitmap, part_ptr->node_bitmap);
+	}
+	list_iterator_destroy(itr);
+
+	bit_not(bitmap);
+	non_usable_nodes = bitmap2node_name(bitmap);
+	FREE_NULL_BITMAP(bitmap);
+	removable_set_bps(non_usable_nodes);
+
 	node_bitmap = bit_alloc(node_record_count);	
 	ionode_bitmap = bit_alloc(bluegene_numpsets);	
 	for (i=0; i<node_select_ptr->record_count; i++) {
@@ -1027,24 +1145,24 @@ extern int load_state_file(char *dir_name)
 		bg_record->ionodes =
 			xstrdup(bg_info_record->ionodes);
 		bg_record->ionode_bitmap = bit_copy(ionode_bitmap);
+		/* put_block_in_error_state should be
+		   called after the bg_list has been
+		   made.  We can't call it here since
+		   this record isn't the record kept
+		   around in bg_list.
+		*/
 		bg_record->state = bg_info_record->state;
-#ifdef HAVE_BGL
-		bg_record->quarter = bg_info_record->quarter;
-		bg_record->nodecard = bg_info_record->nodecard;
-#endif
-		if(bg_info_record->state == RM_PARTITION_ERROR)
-			bg_record->job_running = BLOCK_ERROR_STATE;
-		else
-			bg_record->job_running = NO_JOB_RUNNING;
+		bg_record->job_running = NO_JOB_RUNNING;
+
 		bg_record->bp_count = bit_size(node_bitmap);
 		bg_record->node_cnt = bg_info_record->node_cnt;
 		if(bluegene_bp_node_cnt > bg_record->node_cnt) {
 			ionodes = bluegene_bp_node_cnt 
 				/ bg_record->node_cnt;
-			bg_record->cpus_per_bp =
-				procs_per_node / ionodes;
+			bg_record->cpu_cnt = procs_per_node / ionodes;
 		} else {
-			bg_record->cpus_per_bp = procs_per_node;
+			bg_record->cpu_cnt = procs_per_node
+				* bg_record->bp_count;
 		}
 #ifdef HAVE_BGL
 		bg_record->node_use = bg_info_record->node_use;
@@ -1079,8 +1197,11 @@ extern int load_state_file(char *dir_name)
 		for(j=0; j<BA_SYSTEM_DIMENSIONS; j++) 
 			geo[j] = bg_record->geo[j];
 				
-		if(bluegene_layout_mode == LAYOUT_OVERLAP) 
+		if(bluegene_layout_mode == LAYOUT_OVERLAP) {
 			reset_ba_system(false);
+			removable_set_bps(non_usable_nodes);
+		}
+
 		results = list_create(NULL);
 		name = set_bg_block(results,
 				    bg_record->start, 
@@ -1124,6 +1245,7 @@ extern int load_state_file(char *dir_name)
 		}
 	}
 
+	xfree(non_usable_nodes);
 	FREE_NULL_BITMAP(ionode_bitmap);
 	FREE_NULL_BITMAP(node_bitmap);
 
@@ -1141,5 +1263,3 @@ unpack_error:
 	free_buf(buffer);
 	return SLURM_FAILURE;
 }
-
-#endif
diff --git a/src/plugins/select/bluegene/plugin/bluegene.c b/src/plugins/select/bluegene/plugin/bluegene.c
index 575c70424..0378a615a 100644
--- a/src/plugins/select/bluegene/plugin/bluegene.c
+++ b/src/plugins/select/bluegene/plugin/bluegene.c
@@ -1,7 +1,7 @@
 /*****************************************************************************\
  *  bluegene.c - blue gene node configuration processing module. 
  *
- *  $Id: bluegene.c 16146 2009-01-06 18:20:48Z da $
+ *  $Id: bluegene.c 17202 2009-04-09 16:56:23Z da $
  *****************************************************************************
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -40,7 +40,7 @@
 #include "defined_block.h"
 #include <stdio.h>
 
-#define MMCS_POLL_TIME 120	/* poll MMCS for down switches and nodes 
+#define MMCS_POLL_TIME 30	/* poll MMCS for down switches and nodes 
 				 * every 120 secs */
 #define BG_POLL_TIME 0	        /* poll bg blocks every 3 secs */
 
@@ -49,8 +49,6 @@
 char* bg_conf = NULL;
 
 /* Global variables */
-my_bluegene_t *bg = NULL;
-
 List bg_list = NULL;			/* total list of bg_record entries */
 List bg_curr_block_list = NULL;  	/* current bg blocks in bluegene.conf*/
 List bg_job_block_list = NULL;  	/* jobs running in these blocks */
@@ -88,6 +86,7 @@ uint16_t bluegene_quarter_ionode_cnt = 0;
 uint16_t bluegene_nodecard_node_cnt = 0;
 uint16_t bluegene_nodecard_ionode_cnt = 0;
 uint16_t bridge_api_verb = 0;
+
 bool agent_fini = false;
 time_t last_bg_update;
 pthread_mutex_t block_state_mutex = PTHREAD_MUTEX_INITIALIZER;
@@ -97,6 +96,8 @@ int blocks_are_created = 0;
 int num_unused_cpus = 0;
 
 pthread_mutex_t freed_cnt_mutex = PTHREAD_MUTEX_INITIALIZER;
+static pthread_cond_t freed_cond = PTHREAD_COND_INITIALIZER;
+static pthread_cond_t destroy_cond = PTHREAD_COND_INITIALIZER;
 List bg_free_block_list = NULL;  	/* blocks to be deleted */
 List bg_destroy_block_list = NULL;       /* blocks to be destroyed */
 int free_cnt = 0;
@@ -121,26 +122,6 @@ static void _destroy_bitmap(void *object);
 /* Initialize all plugin variables */
 extern int init_bg(void)
 {
-#ifdef HAVE_BG_FILES
-	int rc;
-	rm_size3D_t bp_size;
-	
-	info("Attempting to contact MMCS");
-	if ((rc = bridge_get_bg(&bg)) != STATUS_OK) {
-		fatal("init_bg: rm_get_BG(): %s", bg_err_str(rc));
-		return SLURM_ERROR;
-	}
-	
-	if ((rc = bridge_get_data(bg, RM_Msize, &bp_size)) != STATUS_OK) {
-		fatal("init_bg: rm_get_data(): %s", bg_err_str(rc));
-		return SLURM_ERROR;
-	}
-	verbose("BlueGene configured with %d x %d x %d base blocks",
-		bp_size.X, bp_size.Y, bp_size.Z);
-	DIM_SIZE[X]=bp_size.X;
-	DIM_SIZE[Y]=bp_size.Y;
-	DIM_SIZE[Z]=bp_size.Z;
-#endif
 	ba_init(NULL);
 
 	info("BlueGene plugin loaded successfully");
@@ -151,10 +132,18 @@ extern int init_bg(void)
 /* Purge all plugin variables */
 extern void fini_bg(void)
 {
-#ifdef HAVE_BG_FILES
-	int rc;
-#endif
-	_set_bg_lists();
+	if(!agent_fini) {
+		error("The agent hasn't been finied yet!");
+		agent_fini = true;
+	}
+	/* wait for the agent threads to finish up */
+	waitfor_block_agents();
+
+	/* wait for the destroy/free threads to finish up */
+	if(free_cnt)
+		pthread_cond_wait(&freed_cond, &freed_cnt_mutex);
+	if(destroy_cnt)
+		pthread_cond_wait(&destroy_cond, &freed_cnt_mutex);
 	
 	if (bg_list) {
 		list_destroy(bg_list);
@@ -173,16 +162,7 @@ extern void fini_bg(void)
 		list_destroy(bg_booted_block_list);
 		bg_booted_block_list = NULL;
 	}
-
-	/* wait for the free threads to finish up don't destroy the
-	 * bg_free_block_list here */
-	while(free_cnt > 0)
-		usleep(1000);
-	/* wait for the destroy threads to finish up don't destroy the
-	 * bg_destroy_block_list here */
-	while(destroy_cnt > 0)
-		usleep(1000);
-	
+		
 #ifdef HAVE_BGL
 	if(bg_blrtsimage_list) {
 		list_destroy(bg_blrtsimage_list);
@@ -232,11 +212,6 @@ extern void fini_bg(void)
 	xfree(bg_slurm_user_name);
 	xfree(bg_slurm_node_prefix);
 	
-#ifdef HAVE_BG_FILES
-	if(bg)
-		if ((rc = bridge_free_bg(bg)) != STATUS_OK)
-			error("bridge_free_BG(): %s", bg_err_str(rc));
-#endif	
 	ba_fini();
 }
 
@@ -258,29 +233,13 @@ extern bool blocks_overlap(bg_record_t *rec_a, bg_record_t *rec_b)
 	if (!bit_overlap(rec_a->bitmap, rec_b->bitmap)) 
 		return false;
 
-#ifdef HAVE_BGL
-
-	if(rec_a->quarter != (uint16_t) NO_VAL) {
-		if(rec_b->quarter == (uint16_t) NO_VAL)
-			return true;
-		else if(rec_a->quarter != rec_b->quarter)
-			return false;
-		if(rec_a->nodecard != (uint16_t) NO_VAL) {
-			if(rec_b->nodecard == (uint16_t) NO_VAL)
-				return true;
-			else if(rec_a->nodecard
-				!= rec_b->nodecard)
-				return false;
-		}
-	}
-#else
 	if((rec_a->node_cnt >= bluegene_bp_node_cnt)
 	   || (rec_b->node_cnt >= bluegene_bp_node_cnt))
 		return true;
 	
 	if (!bit_overlap(rec_a->ionode_bitmap, rec_b->ionode_bitmap)) 
 		return false;
-#endif	
+
 	return true;
 }
 
@@ -408,6 +367,20 @@ extern char* convert_conn_type(rm_connection_type_t conn_type)
 		return "SMALL"; 
 	case (SELECT_NAV):
 		return "NAV";
+#ifndef HAVE_BGL
+	case SELECT_HTC_S:
+		return "HTC_S";
+		break;
+	case SELECT_HTC_D:
+		return "HTC_D";
+		break;
+	case SELECT_HTC_V:
+		return "HTC_V";
+		break;
+	case SELECT_HTC_L:
+		return "HTC_L";
+		break;
+#endif
 	default:
 		break;
 	}
@@ -440,51 +413,69 @@ extern void sort_bg_record_inc_size(List records){
 }
 
 /*
- * bluegene_agent - detached thread periodically updates status of
- * bluegene nodes. 
+ * block_agent - thread periodically updates status of
+ * bluegene blocks. 
  * 
- * NOTE: I don't grab any locks here because slurm_drain_nodes grabs
- * the necessary locks.
  */
-extern void *bluegene_agent(void *args)
+extern void *block_agent(void *args)
 {
-	static time_t last_mmcs_test;
 	static time_t last_bg_test;
 	int rc;
+	time_t now = time(NULL);
 
-	last_mmcs_test = time(NULL) + MMCS_POLL_TIME;
-	last_bg_test = time(NULL) + BG_POLL_TIME;
+	last_bg_test = now - BG_POLL_TIME;
 	while (!agent_fini) {
-		time_t now = time(NULL);
 
 		if (difftime(now, last_bg_test) >= BG_POLL_TIME) {
 			if (agent_fini)		/* don't bother */
-				return NULL;	/* quit now */
+				break;	/* quit now */
 			if(blocks_are_created) {
 				last_bg_test = now;
 				if((rc = update_block_list()) == 1) {
-					slurm_mutex_lock(&block_state_mutex);
 					last_bg_update = now;
-					slurm_mutex_unlock(&block_state_mutex);
 				} else if(rc == -1)
 					error("Error with update_block_list");
 				if(bluegene_layout_mode == LAYOUT_DYNAMIC) {
 					if((rc = update_freeing_block_list())
-					   == -1)
+					   == 1) {
+						last_bg_update = now;
+					} else if(rc == -1)
 						error("Error with "
 						      "update_block_list 2");
 				}
 			}
+			now = time(NULL);
 		}
+		
+		sleep(1);
+	}
+	return NULL;
+}
 
+/*
+ * state_agent - thread periodically updates status of
+ * bluegene nodes. 
+ * 
+ */
+extern void *state_agent(void *args)
+{
+	static time_t last_mmcs_test;
+	time_t now = time(NULL);
+
+	last_mmcs_test = now - MMCS_POLL_TIME;
+	while (!agent_fini) {
 		if (difftime(now, last_mmcs_test) >= MMCS_POLL_TIME) {
 			if (agent_fini)		/* don't bother */
-				return NULL;	/* quit now */
-			last_mmcs_test = now;
-			test_mmcs_failures();	/* can run for a while */
-		}	
+				break; 	/* quit now */
+			if(blocks_are_created) {
+				last_mmcs_test = now;
+				/* can run for a while */
+				test_mmcs_failures();
+			}
+		} 	
 				
 		sleep(1);
+		now = time(NULL);
 	}
 	return NULL;
 }
@@ -502,7 +493,7 @@ extern int remove_from_bg_list(List my_bg_list, bg_record_t *bg_record)
 
 	//slurm_mutex_lock(&block_state_mutex);	
 	itr = list_iterator_create(my_bg_list);
-	while ((found_record = (bg_record_t *) list_next(itr)) != NULL) {
+	while ((found_record = list_next(itr))) {
 		if(found_record)
 			if(bg_record == found_record) {
 				list_remove(itr);
@@ -615,97 +606,26 @@ extern int bg_free_block(bg_record_t *bg_record)
 		}
 		
 		if ((bg_record->state == RM_PARTITION_FREE)
-		    ||  (bg_record->state == RM_PARTITION_ERROR)) {
-			break;
-		}
-		slurm_mutex_unlock(&block_state_mutex);			
-		sleep(3);
-	}
-	remove_from_bg_list(bg_booted_block_list, bg_record);
-	slurm_mutex_unlock(&block_state_mutex);			
-		
-	return SLURM_SUCCESS;
-}
-
-#ifndef HAVE_BGL
-/* This function not available in bgl land */
-extern int bg_reboot_block(bg_record_t *bg_record)
-{
-#ifdef HAVE_BG_FILES
-	int rc;
-#endif
-	if(!bg_record) {
-		error("bg_reboot_block: there was no bg_record");
-		return SLURM_ERROR;
-	}
-	
-	while (1) {
-		if(!bg_record) {
-			error("bg_reboot_block: there was no bg_record");
-			return SLURM_ERROR;
-		}
-		
-		slurm_mutex_lock(&block_state_mutex);			
-		if (bg_record->state != NO_VAL
-		    && bg_record->state != RM_PARTITION_REBOOTING) {
-#ifdef HAVE_BG_FILES
-			debug2("bridge_reboot %s", bg_record->bg_block_id);
-			
-			rc = bridge_reboot_block(bg_record->bg_block_id);
-			if (rc != STATUS_OK) {
-				if(rc == PARTITION_NOT_FOUND) {
-					debug("block %s is not found",
-					      bg_record->bg_block_id);
-					break;
-				} else if(rc == INCOMPATIBLE_STATE) {
-					debug2("bridge_reboot_partition"
-					       "(%s): %s State = %d",
-					       bg_record->bg_block_id, 
-					       bg_err_str(rc), 
-					       bg_record->state);
-				} else {
-					error("bridge_reboot_partition"
-					      "(%s): %s State = %d",
-					      bg_record->bg_block_id, 
-					      bg_err_str(rc), 
-					      bg_record->state);
-				}
-			}
-#else
-			bg_record->state = RM_PARTITION_READY;	
-			break;
+#ifdef HAVE_BGL
+		    ||  (bg_record->state == RM_PARTITION_ERROR)
 #endif
-		}
-		
-		if (bg_record->state == RM_PARTITION_CONFIGURING) {
-			if(!block_exist_in_list(bg_booted_block_list,
-						bg_record))
-				list_push(bg_booted_block_list, bg_record);
-			break;
-		} else if (bg_record->state == RM_PARTITION_ERROR) {
-			remove_from_bg_list(bg_booted_block_list, bg_record);
+			) {
 			break;
 		}
 		slurm_mutex_unlock(&block_state_mutex);			
 		sleep(3);
 	}
+	remove_from_bg_list(bg_booted_block_list, bg_record);
 	slurm_mutex_unlock(&block_state_mutex);			
 		
 	return SLURM_SUCCESS;
 }
-#endif
 
 /* Free multiple blocks in parallel */
 extern void *mult_free_block(void *args)
 {
 	bg_record_t *bg_record = NULL;
-	
-	slurm_mutex_lock(&freed_cnt_mutex);
-	if ((bg_freeing_list == NULL) 
-	    && ((bg_freeing_list = list_create(destroy_bg_record)) == NULL))
-		fatal("malloc failure in bg_freeing_list");
-	slurm_mutex_unlock(&freed_cnt_mutex);
-	
+		
 	/*
 	 * Don't just exit when there is no work left. Creating 
 	 * pthreads from within a dynamically linked object (plugin)
@@ -737,13 +657,10 @@ extern void *mult_free_block(void *args)
 	}
 	slurm_mutex_lock(&freed_cnt_mutex);
 	free_cnt--;
-	if(bg_freeing_list) {
-		list_destroy(bg_freeing_list);
-		bg_freeing_list = NULL;
-	}
 	if(free_cnt == 0) {
 		list_destroy(bg_free_block_list);
 		bg_free_block_list = NULL;
+		pthread_cond_signal(&freed_cond);
 	}
 	slurm_mutex_unlock(&freed_cnt_mutex);
 	return NULL;
@@ -788,8 +705,7 @@ extern void *mult_destroy_block(void *args)
 		sort_bg_record_inc_size(bg_freeing_list);
 		if(remove_from_bg_list(bg_job_block_list, bg_record) 
 		   == SLURM_SUCCESS) {
-			num_unused_cpus += 
-				bg_record->bp_count*bg_record->cpus_per_bp;
+			num_unused_cpus += bg_record->cpu_cnt;
 		}
 		slurm_mutex_unlock(&block_state_mutex);
 		debug3("removing the jobs on block %s\n",
@@ -824,7 +740,10 @@ extern void *mult_destroy_block(void *args)
 			debug2("done %s", 
 			       (char *)bg_record->bg_block_id);
 #endif
+		slurm_mutex_lock(&block_state_mutex);
 		destroy_bg_record(bg_record);
+		slurm_mutex_unlock(&block_state_mutex);
+		last_bg_update = time(NULL);
 		debug2("destroyed");
 		
 	already_here:
@@ -835,13 +754,14 @@ extern void *mult_destroy_block(void *args)
 	}
 	slurm_mutex_lock(&freed_cnt_mutex);
 	destroy_cnt--;
-	if(bg_freeing_list) {
-		list_destroy(bg_freeing_list);
-		bg_freeing_list = NULL;
-	}
 	if(destroy_cnt == 0) {
+		if(bg_freeing_list) {
+			list_destroy(bg_freeing_list);
+			bg_freeing_list = NULL;
+		}
 		list_destroy(bg_destroy_block_list);
 		bg_destroy_block_list = NULL;
+		pthread_cond_signal(&destroy_cond);
 	}
 	slurm_mutex_unlock(&freed_cnt_mutex);
 
@@ -878,9 +798,15 @@ extern int free_block_list(List delete_list)
 	while ((found_record = (bg_record_t*)list_pop(delete_list)) != NULL) {
 		/* push job onto queue in a FIFO */
 		debug3("adding %s to be freed", found_record->bg_block_id);
-		if (list_push(*block_list, found_record) == NULL)
-			fatal("malloc failure in _block_op/list_push");
-		
+		if(!block_ptr_exist_in_list(*block_list, found_record)) {
+			if (list_push(*block_list, found_record) == NULL)
+				fatal("malloc failure in _block_op/list_push");
+		} else {
+			error("we had block %s already on the freeing list",
+			      found_record->bg_block_id);
+			num_block_to_free--;
+			continue;
+		}
 		/* already running MAX_AGENTS we don't really need more 
 		   since they don't end until we shut down the controller */
 		if (*count > MAX_AGENT_COUNT) 
@@ -1198,9 +1124,16 @@ extern int read_bg_conf(void)
 		bitstr_t *tmp_bitmap = NULL;
 		int small_size = 1;
 
-		bluegene_quarter_ionode_cnt = bluegene_numpsets/4;
-		bluegene_nodecard_ionode_cnt = bluegene_quarter_ionode_cnt/4;
-
+		/* THIS IS A HACK TO MAKE A 1 NODECARD SYSTEM WORK */
+		if(bluegene_bp_node_cnt == bluegene_nodecard_node_cnt) {
+			bluegene_quarter_ionode_cnt = 2;
+			bluegene_nodecard_ionode_cnt = 2;
+		} else {
+			bluegene_quarter_ionode_cnt = bluegene_numpsets/4;
+			bluegene_nodecard_ionode_cnt =
+				bluegene_quarter_ionode_cnt/4;
+		}
+			
 		/* How many nodecards per ionode */
 		bluegene_nc_ratio = 
 			((double)bluegene_bp_node_cnt 
@@ -1255,6 +1188,11 @@ extern int read_bg_conf(void)
 				list_append(bg_valid_small32, tmp_bitmap);
 			}
 		}
+		/* If we only have 1 nodecard just jump to the end
+		   since this will never need to happen below.
+		   Pretty much a hack to avoid seg fault;). */
+		if(bluegene_bp_node_cnt == bluegene_nodecard_node_cnt) 
+			goto no_calc;
 
 		bg_valid_small128 = list_create(_destroy_bitmap);
 		if((small_size = bluegene_quarter_ionode_cnt))
@@ -1294,6 +1232,8 @@ extern int read_bg_conf(void)
 		fatal("your numpsets is 0");
 	}
 
+no_calc:
+
 	if (!s_p_get_uint16(&bridge_api_verb, "BridgeAPIVerbose", tbl))
 		info("Warning: BridgeAPIVerbose not configured "
 		     "in bluegene.conf");
@@ -1301,6 +1241,20 @@ extern int read_bg_conf(void)
 		info("BridgeAPILogFile not configured in bluegene.conf");
 	else
 		_reopen_bridge_log();
+
+	if (s_p_get_string(&layout, "DenyPassthrough", tbl)) {
+		if(strstr(layout, "X")) 
+			ba_deny_pass |= PASS_DENY_X;
+		if(strstr(layout, "Y")) 
+			ba_deny_pass |= PASS_DENY_Y;
+		if(strstr(layout, "Z")) 
+			ba_deny_pass |= PASS_DENY_Z;
+		if(!strcasecmp(layout, "ALL")) 
+			ba_deny_pass |= PASS_DENY_ALL;
+		
+		xfree(layout);
+	}
+
 	if (!s_p_get_string(&layout, "LayoutMode", tbl)) {
 		info("Warning: LayoutMode was not specified in bluegene.conf "
 		     "defaulting to STATIC partitioning");
@@ -1329,7 +1283,7 @@ extern int read_bg_conf(void)
 		}
 		
 		for (i = 0; i < count; i++) {
-			add_bg_record(bg_list, NULL, blockreq_array[i]);
+			add_bg_record(bg_list, NULL, blockreq_array[i], 0, 0);
 		}
 	}
 	s_p_hashtbl_destroy(tbl);
@@ -1340,14 +1294,17 @@ extern int read_bg_conf(void)
 extern int validate_current_blocks(char *dir)
 {
 	/* found bg blocks already on system */
-	List bg_found_block_list = list_create(NULL);
+	List bg_found_block_list = NULL;
 	static time_t last_config_update = (time_t) 0;
+	ListIterator itr = NULL;
+	bg_record_t *bg_record = NULL;
 
 	/* only run on startup */
 	if(last_config_update)
 		return SLURM_SUCCESS;
 
 	last_config_update = time(NULL);
+	bg_found_block_list = list_create(NULL);
 //#if 0	
 	/* Check to see if the configs we have are correct */
 	if (_validate_config_nodes(&bg_found_block_list, dir) == SLURM_ERROR) { 
@@ -1371,6 +1328,17 @@ extern int validate_current_blocks(char *dir)
 		}
 	} 
 	
+	/* ok now since bg_list has been made we now can put blocks in
+	   an error state this needs to be done outside of a lock
+	   it doesn't matter much in the first place though since
+	   no threads are started before this function. */
+	itr = list_iterator_create(bg_list);
+	while((bg_record = list_next(itr))) {
+		if(bg_record->state == RM_PARTITION_ERROR) 
+			put_block_in_error_state(bg_record, BLOCK_ERROR_STATE);
+	}
+	list_iterator_destroy(itr);
+
 	slurm_mutex_lock(&block_state_mutex);
 	list_destroy(bg_curr_block_list);
 	bg_curr_block_list = NULL;
@@ -1378,6 +1346,7 @@ extern int validate_current_blocks(char *dir)
 		list_destroy(bg_found_block_list);
 		bg_found_block_list = NULL;
 	}
+
 	last_bg_update = time(NULL);
 	blocks_are_created = 1;
 	sort_bg_record_inc_size(bg_list);
@@ -1421,8 +1390,7 @@ static void _set_bg_lists()
 	bg_mloaderimage_list = list_create(destroy_image);
 	if(bg_ramdiskimage_list)
 		list_destroy(bg_ramdiskimage_list);
-	bg_ramdiskimage_list = list_create(destroy_image);
-	
+	bg_ramdiskimage_list = list_create(destroy_image);	
 }
 
 /*
@@ -1439,7 +1407,6 @@ static int _validate_config_nodes(List *bg_found_block_list, char *dir)
 	int rc = SLURM_ERROR;
 	bg_record_t* bg_record = NULL;	
 	bg_record_t* init_bg_record = NULL;
-	bg_record_t* full_system_bg_record = NULL;	
 	int full_created = 0;
 	ListIterator itr_conf;
 	ListIterator itr_curr;
@@ -1450,8 +1417,12 @@ static int _validate_config_nodes(List *bg_found_block_list, char *dir)
 	 * happens in the state load before this in emulation mode */
 	if (read_bg_blocks() == SLURM_ERROR)
 		return SLURM_ERROR;
+	/* since we only care about error states here we don't care
+	   about the return code this must be done after the bg_list
+	   is created */
+	load_state_file(dir);
 #else
-	/* read in state from last run.  Only for emulation mode */
+	/* read in state from last run. */
 	if ((rc = load_state_file(dir)) != SLURM_SUCCESS)
 		return rc;
 	/* This needs to be reset to SLURM_ERROR or it will never we
@@ -1463,44 +1434,32 @@ static int _validate_config_nodes(List *bg_found_block_list, char *dir)
 
 	if(!bg_curr_block_list)
 		return SLURM_ERROR;
-
-	itr_curr = list_iterator_create(bg_curr_block_list);
-	while ((init_bg_record = list_next(itr_curr))) 
-		if(init_bg_record->full_block) 
-			full_system_bg_record = init_bg_record;	
 	
 	if(!*bg_found_block_list)
 		(*bg_found_block_list) = list_create(NULL);
-	
+
+	itr_curr = list_iterator_create(bg_curr_block_list);
 	itr_conf = list_iterator_create(bg_list);
 	while ((bg_record = (bg_record_t*) list_next(itr_conf))) {
-		/* translate hostlist to ranged 
-		   string for consistent format
-		   search here 
-		*/
 		list_iterator_reset(itr_curr);
-		while ((init_bg_record = list_next(itr_curr))) {		
-			if (strcasecmp(bg_record->nodes, 
-				       init_bg_record->nodes))
+		while ((init_bg_record = list_next(itr_curr))) {
+			if (strcasecmp(bg_record->nodes, init_bg_record->nodes))
 				continue; /* wrong nodes */
-			if (bg_record->conn_type 
-			    != init_bg_record->conn_type)
-				continue; /* wrong conn_type */
+			if(!bit_equal(bg_record->ionode_bitmap,
+				      init_bg_record->ionode_bitmap))
+				continue;
 #ifdef HAVE_BGL
-			if(bg_record->quarter !=
-			   init_bg_record->quarter)
-				continue; /* wrong quart */
-			if(bg_record->nodecard !=
-			   init_bg_record->nodecard)
-				continue; /* wrong nodecard */
+			if (bg_record->conn_type != init_bg_record->conn_type)
+				continue; /* wrong conn_type */
 			if(bg_record->blrtsimage &&
 			   strcasecmp(bg_record->blrtsimage,
 				      init_bg_record->blrtsimage)) 
 				continue;
 #else
-			if(!bit_equal(bg_record->ionode_bitmap,
-				     init_bg_record->ionode_bitmap))
-				continue;
+			if ((bg_record->conn_type != init_bg_record->conn_type)
+			    && ((bg_record->conn_type < SELECT_SMALL)
+				&& (init_bg_record->conn_type < SELECT_SMALL)))
+				continue; /* wrong conn_type */
 #endif
 			if(bg_record->linuximage &&
 			   strcasecmp(bg_record->linuximage,
@@ -1516,6 +1475,10 @@ static int _validate_config_nodes(List *bg_found_block_list, char *dir)
 				continue;
 		       			
 			copy_bg_record(init_bg_record, bg_record);
+			/* remove from the curr list since we just
+			   matched it no reason to keep it around
+			   anymore */
+			list_delete_item(itr_curr);
 			break;
 		}
 			
@@ -1539,37 +1502,45 @@ static int _validate_config_nodes(List *bg_found_block_list, char *dir)
 			     convert_conn_type(bg_record->conn_type));
 			if(((bg_record->state == RM_PARTITION_READY)
 			    || (bg_record->state == RM_PARTITION_CONFIGURING))
-			   && !block_exist_in_list(bg_booted_block_list, 
-						   bg_record))
+			   && !block_ptr_exist_in_list(bg_booted_block_list, 
+						       bg_record))
 				list_push(bg_booted_block_list, bg_record);
 		}
 	}		
-	list_iterator_destroy(itr_conf);
-	list_iterator_destroy(itr_curr);
 	if(bluegene_layout_mode == LAYOUT_DYNAMIC)
 		goto finished;
 
-	if(!full_created && full_system_bg_record) {
-		bg_record = xmalloc(sizeof(bg_record_t));
-		copy_bg_record(full_system_bg_record, bg_record);
-		list_append(bg_list, bg_record);
-		list_push(*bg_found_block_list, bg_record);
-		format_node_name(bg_record, tmp_char, sizeof(tmp_char));
-		info("Existing: BlockID:%s Nodes:%s Conn:%s",
-		     bg_record->bg_block_id, 
-		     tmp_char,
-		     convert_conn_type(bg_record->conn_type));
-		if(((bg_record->state == RM_PARTITION_READY)
-		    || (bg_record->state == RM_PARTITION_CONFIGURING))
-		   && !block_exist_in_list(bg_booted_block_list, 
-					   bg_record))
-			list_push(bg_booted_block_list, bg_record);
+	if(!full_created) {
+		list_iterator_reset(itr_curr);
+		while ((init_bg_record = list_next(itr_curr))) {
+			if(init_bg_record->full_block) {
+				list_remove(itr_curr);
+				bg_record = init_bg_record;
+				list_append(bg_list, bg_record);
+				list_push(*bg_found_block_list, bg_record);
+				format_node_name(bg_record, tmp_char,
+						 sizeof(tmp_char));
+				info("Existing: BlockID:%s Nodes:%s Conn:%s",
+				     bg_record->bg_block_id, 
+				     tmp_char,
+				     convert_conn_type(bg_record->conn_type));
+				if(((bg_record->state == RM_PARTITION_READY)
+				    || (bg_record->state 
+					== RM_PARTITION_CONFIGURING))
+				   && !block_ptr_exist_in_list(
+					   bg_booted_block_list, bg_record))
+					list_push(bg_booted_block_list,
+						  bg_record);
+				break;
+			}
+		}
 	}
 		
 finished:
-	if(list_count(bg_list) == list_count(bg_curr_block_list))
+	list_iterator_destroy(itr_conf);
+	list_iterator_destroy(itr_curr);
+	if(!list_count(bg_curr_block_list))
 		rc = SLURM_SUCCESS;
-	
 	return rc;
 }
 
@@ -1681,6 +1652,8 @@ static int _delete_old_blocks(List bg_found_block_list)
 		
 	retries=30;
 	while(num_block_to_free > num_block_freed) {
+		/* no need to check for return code here, things
+		   haven't started up yet. */
 		update_freeing_block_list();
 		if(retries==30) {
 			info("Waiting for old blocks to be "
diff --git a/src/plugins/select/bluegene/plugin/bluegene.h b/src/plugins/select/bluegene/plugin/bluegene.h
index 9730f46aa..3f6201036 100644
--- a/src/plugins/select/bluegene/plugin/bluegene.h
+++ b/src/plugins/select/bluegene/plugin/bluegene.h
@@ -1,7 +1,7 @@
 /*****************************************************************************\
  *  bluegene.h - header for blue gene configuration processing module. 
  *
- *  $Id: bluegene.h 16146 2009-01-06 18:20:48Z da $
+ *  $Id: bluegene.h 17102 2009-03-31 23:23:01Z da $
  *****************************************************************************
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -49,10 +49,7 @@ typedef enum bg_layout_type {
 	LAYOUT_DYNAMIC	/* slurm will make all blocks */
 } bg_layout_t;
 
-
 /* Global variables */
-
-extern my_bluegene_t *bg;
 #ifdef HAVE_BGL
 extern char *default_blrtsimage;
 #endif
@@ -74,6 +71,7 @@ extern uint16_t bluegene_nodecard_node_cnt;
 extern uint16_t bluegene_nodecard_ionode_cnt;
 extern uint16_t bluegene_quarter_node_cnt;
 extern uint16_t bluegene_quarter_ionode_cnt;
+
 extern ba_system_t *ba_system_ptr;
 extern time_t last_bg_update;
 
@@ -104,6 +102,7 @@ extern int num_unused_cpus;
 
 #define MAX_PTHREAD_RETRIES  1
 #define BLOCK_ERROR_STATE    -3
+#define ADMIN_ERROR_STATE    -4
 #define NO_JOB_RUNNING       -1
 #define MAX_AGENT_COUNT      30
 #define BUFSIZE 4096
@@ -145,15 +144,15 @@ extern char *convert_node_use(rm_partition_mode_t pt);
 /* sort a list of bg_records by size (node count) */
 extern void sort_bg_record_inc_size(List records);
 
-/* bluegene_agent - detached thread periodically tests status of bluegene 
- * nodes and switches */
-extern void *bluegene_agent(void *args);
+/* block_agent - detached thread periodically tests status of bluegene 
+ * blocks */
+extern void *block_agent(void *args);
 
-extern int bg_free_block(bg_record_t *bg_record);
+/* state_agent - thread periodically tests status of bluegene 
+ * nodes, nodecards, and switches */
+extern void *state_agent(void *args);
 
-#ifndef HAVE_BGL
-extern int bg_reboot_block(bg_record_t *bg_record);
-#endif
+extern int bg_free_block(bg_record_t *bg_record);
 
 extern int remove_from_bg_list(List my_bg_list, bg_record_t *bg_record);
 extern bg_record_t *find_and_remove_org_from_bg_list(List my_list, 
diff --git a/src/plugins/select/bluegene/plugin/defined_block.c b/src/plugins/select/bluegene/plugin/defined_block.c
index 084344a2c..c2ac0a62b 100644
--- a/src/plugins/select/bluegene/plugin/defined_block.c
+++ b/src/plugins/select/bluegene/plugin/defined_block.c
@@ -59,10 +59,29 @@ extern int create_defined_blocks(bg_layout_t overlapped,
 	int geo[BA_SYSTEM_DIMENSIONS];
 	char temp[256];
 	List results = NULL;
-	
+	struct part_record *part_ptr = NULL;
+	char *non_usable_nodes = NULL;
+	bitstr_t *bitmap = bit_alloc(node_record_count);
+
 #ifdef HAVE_BG_FILES
 	init_wires();
 #endif
+ 		
+	/* Locks are already in place to protect part_list here */
+	itr = list_iterator_create(part_list);
+	while ((part_ptr = list_next(itr))) {
+		/* we only want to use bps that are in
+		 * partitions
+		 */
+		bit_or(bitmap, part_ptr->node_bitmap);
+	}
+	list_iterator_destroy(itr);
+
+	bit_not(bitmap);
+	non_usable_nodes = bitmap2node_name(bitmap);
+	removable_set_bps(non_usable_nodes);
+	FREE_NULL_BITMAP(bitmap);
+	
 	slurm_mutex_lock(&block_state_mutex);
 	reset_ba_system(false);
 	if(bg_list) {
@@ -73,27 +92,18 @@ extern int create_defined_blocks(bg_layout_t overlapped,
 					bg_found_block_list);
 				while ((found_record = (bg_record_t*) 
 					list_next(itr_found)) != NULL) {
-/* 					info("%s.%d.%d ?= %s.%d.%d\n", */
+/* 					info("%s[%s[ ?= %s[%s]\n", */
 /* 					     bg_record->nodes, */
-/* 					     bg_record->quarter, */
-/* 					     bg_record->nodecard, */
+/* 					     bg_record->ionodes, */
 /* 					     found_record->nodes, */
-/* 					     found_record->quarter, */
-/* 					     found_record->nodecard); */
+/* 					     found_record->ionodes); */
 					
 					if ((bit_equal(bg_record->bitmap, 
 						       found_record->bitmap))
-#ifdef HAVE_BGL
-					    && (bg_record->quarter ==
-						found_record->quarter)
-					    && (bg_record->nodecard ==
-						found_record->nodecard)
-#else
 					    && (bit_equal(bg_record->
 							  ionode_bitmap, 
 							  found_record->
 							  ionode_bitmap))
-#endif
 						) {
 						/* don't reboot this one */
 						break;	
@@ -104,14 +114,16 @@ extern int create_defined_blocks(bg_layout_t overlapped,
 				error("create_defined_blocks: "
 				      "no bg_found_block_list 1");
 			}
-			if(bg_record->bp_count>0 
+			if(bg_record->bp_count > 0 
 			   && !bg_record->full_block
-			   && bg_record->cpus_per_bp == procs_per_node) {
+			   && bg_record->cpu_cnt >= procs_per_node) {
 				char *name = NULL;
 
-				if(overlapped == LAYOUT_OVERLAP) 
+				if(overlapped == LAYOUT_OVERLAP) {
 					reset_ba_system(false);
-									
+					removable_set_bps(non_usable_nodes);
+				}
+
 				/* we want the bps that aren't
 				 * in this record to mark them as used
 				 */
@@ -149,6 +161,7 @@ extern int create_defined_blocks(bg_layout_t overlapped,
 						reset_all_removed_bps();
 						slurm_mutex_unlock(
 							&block_state_mutex);
+						xfree(non_usable_nodes);
 						return SLURM_ERROR;
 					}
 				} else {
@@ -167,6 +180,7 @@ extern int create_defined_blocks(bg_layout_t overlapped,
 						list_iterator_destroy(itr);
 						slurm_mutex_unlock(
 							&block_state_mutex);
+						xfree(non_usable_nodes);
 						return SLURM_ERROR;
 					}
 					
@@ -215,6 +229,7 @@ extern int create_defined_blocks(bg_layout_t overlapped,
 				   == SLURM_ERROR) {
 					list_iterator_destroy(itr);
 					slurm_mutex_unlock(&block_state_mutex);
+					xfree(non_usable_nodes);
 					return rc;
 				}
 				print_bg_record(bg_record);
@@ -224,8 +239,11 @@ extern int create_defined_blocks(bg_layout_t overlapped,
 	} else {
 		error("create_defined_blocks: no bg_list 2");
 		slurm_mutex_unlock(&block_state_mutex);
+		xfree(non_usable_nodes);
 		return SLURM_ERROR;
 	}
+	xfree(non_usable_nodes);
+
 	slurm_mutex_unlock(&block_state_mutex);
 	create_full_system_block(bg_found_block_list);
 
@@ -263,7 +281,28 @@ extern int create_full_system_block(List bg_found_block_list)
 	int i;
 	blockreq_t blockreq;
 	List results = NULL;
+	struct part_record *part_ptr = NULL;
+	bitstr_t *bitmap = bit_alloc(node_record_count);
 	
+	/* Locks are already in place to protect part_list here */
+	itr = list_iterator_create(part_list);
+	while ((part_ptr = list_next(itr))) {
+		/* we only want to use bps that are in
+		 * partitions
+		 */
+		bit_or(bitmap, part_ptr->node_bitmap);
+	}
+	list_iterator_destroy(itr);
+
+	bit_not(bitmap);
+	if(bit_ffs(bitmap) != -1) {
+		error("We don't have the entire system covered by partitions, "
+		      "can't create full system block");
+		FREE_NULL_BITMAP(bitmap);
+		return SLURM_ERROR;
+	}
+	FREE_NULL_BITMAP(bitmap);
+
 	/* Here we are adding a block that in for the entire machine 
 	   just in case it isn't in the bluegene.conf file.
 	*/
@@ -331,7 +370,7 @@ extern int create_full_system_block(List bg_found_block_list)
 	blockreq.block = name;
 	blockreq.conn_type = SELECT_TORUS;
 
-	add_bg_record(records, NULL, &blockreq);
+	add_bg_record(records, NULL, &blockreq, 0 , 0);
 	xfree(name);
 	
 	bg_record = (bg_record_t *) list_pop(records);
@@ -357,8 +396,7 @@ extern int create_full_system_block(List bg_found_block_list)
 			    geo, 
 			    bg_record->conn_type);
 	if(!name) {
-		error("I was unable to make the "
-		      "requested block.");
+		error("I was unable to make the full system block.");
 		list_destroy(results);
 		list_iterator_destroy(itr);
 		slurm_mutex_unlock(&block_state_mutex);
diff --git a/src/plugins/select/bluegene/plugin/dynamic_block.c b/src/plugins/select/bluegene/plugin/dynamic_block.c
index 508b55dc7..79d265616 100644
--- a/src/plugins/select/bluegene/plugin/dynamic_block.c
+++ b/src/plugins/select/bluegene/plugin/dynamic_block.c
@@ -38,13 +38,9 @@
 
 #include "dynamic_block.h"
 
-#ifdef HAVE_BGL
-static int _split_block(List block_list, List new_blocks,
-			bg_record_t *bg_record, int procs);
-#else
 static int _split_block(List block_list, List new_blocks,
 			bg_record_t *bg_record, int cnodes);
-#endif
+
 static int _breakup_blocks(List block_list, List new_blocks,
 			   ba_request_t *request, List my_block_list);
 
@@ -54,7 +50,8 @@ static int _breakup_blocks(List block_list, List new_blocks,
  * RET - a list of created block(s) or NULL on failure errno is set.
  */
 extern List create_dynamic_block(List block_list, 
-				 ba_request_t *request, List my_block_list)
+				 ba_request_t *request, List my_block_list,
+				 bool track_down_nodes)
 {
 	int rc = SLURM_SUCCESS;
 	
@@ -79,7 +76,7 @@ extern List create_dynamic_block(List block_list,
 
 	slurm_mutex_lock(&block_state_mutex);
 	if(my_block_list) {
-		reset_ba_system(true);
+		reset_ba_system(track_down_nodes);
 		itr = list_iterator_create(my_block_list);
 		while ((bg_record = list_next(itr))) {
 			if(!my_bitmap) {
@@ -137,44 +134,18 @@ extern List create_dynamic_block(List block_list,
 		xfree(nodes);
 		FREE_NULL_BITMAP(bitmap);
 	}
-#ifdef HAVE_BGL
-	if(request->size==1 && cnodes < bluegene_bp_node_cnt) {
-		request->conn_type = SELECT_SMALL;
-		if(request->procs == (procs_per_node/16)) {
-			if(!bluegene_nodecard_ionode_cnt) {
-				error("can't create this size %d "
-				      "on this system numpsets is %d",
-				      request->procs,
-				      bluegene_numpsets);
-				goto finished;
-			}
 
-			blockreq.small32=4;
-			blockreq.small128=3;
-		} else {
-			if(!bluegene_quarter_ionode_cnt) {
-				error("can't create this size %d "
-				      "on this system numpsets is %d",
-				      request->procs,
-				      bluegene_numpsets);
-				goto finished;
-			}
-			blockreq.small128=4;
-		}
-		new_blocks = list_create(destroy_bg_record);
-		if(_breakup_blocks(block_list, new_blocks, 
-				   request, my_block_list)
-		   != SLURM_SUCCESS) {
-			list_destroy(new_blocks);
-			new_blocks = NULL;
-			debug2("small block not able to be placed");
-			//rc = SLURM_ERROR;
-		} else 
-			goto finished;
-	}
-#else
 	if(request->size==1 && cnodes < bluegene_bp_node_cnt) {
 		switch(cnodes) {
+#ifdef HAVE_BGL
+		case 32:
+			blockreq.small32 = 4;
+			blockreq.small128 = 3;
+			break;
+		case 128:
+			blockreq.small128 = 4;
+			break;
+#else
 		case 16:
 			blockreq.small16 = 2;
 			blockreq.small32 = 1;
@@ -200,6 +171,7 @@ extern List create_dynamic_block(List block_list,
 		case 256:
 			blockreq.small256 = 2;
 			break;
+#endif
 		default:
 			error("This size %d is unknown on this system", cnodes);
 			goto finished;
@@ -218,16 +190,23 @@ extern List create_dynamic_block(List block_list,
 		} else 
 			goto finished;
 	}
-#endif	
+
 	if(request->conn_type == SELECT_NAV)
 		request->conn_type = SELECT_TORUS;
 	
+	//debug("going to create %d", request->size);
 	if(!new_ba_request(request)) {
-		error("Problems with request for size %d geo %dx%dx%d", 
-		      request->size,
-		      request->geometry[X], 
-		      request->geometry[Y], 
-		      request->geometry[Z]);
+		if(geo[X] == (uint16_t)NO_VAL) {
+			error("Problems with request for size %d geo %dx%dx%d", 
+			      request->size,
+			      request->geometry[X], 
+			      request->geometry[Y], 
+			      request->geometry[Z]);
+		} else {
+			error("Problems with request for size %d.  "
+			      "No geo given.", 
+			      request->size);
+		}
 		rc = ESLURM_INTERCONNECT_FAILURE;
 		goto finished;
 	} 
@@ -251,16 +230,8 @@ extern List create_dynamic_block(List block_list,
 			   set in the ionode_bitmap.
 			*/
 			if(bg_record->job_running == NO_JOB_RUNNING 
-#ifdef HAVE_BGL
-			   && (bg_record->quarter == (uint16_t) NO_VAL
-			       || (bg_record->quarter == 0 
-				   && (bg_record->nodecard == (uint16_t) NO_VAL
-				       || bg_record->nodecard == 0)))
-#else
 			   && ((bg_record->node_cnt >= bluegene_bp_node_cnt)
-			       || (bit_ffs(bg_record->ionode_bitmap) == 0))
-#endif
-				) {
+			       || (bit_ffs(bg_record->ionode_bitmap) == 0))) {
 				
 				for(i=0; i<BA_SYSTEM_DIMENSIONS; i++) 
 					request->start[i] = 
@@ -325,7 +296,7 @@ no_list:
 	blockreq.ramdiskimage = request->ramdiskimage;
 	blockreq.conn_type = request->conn_type;
 
-	add_bg_record(new_blocks, results, &blockreq);
+	add_bg_record(new_blocks, results, &blockreq, 0, 0);
 
 finished:
 	reset_all_removed_bps();
@@ -344,91 +315,14 @@ finished:
 	return new_blocks;
 }
 
-#ifdef HAVE_BGL
-extern bg_record_t *create_small_record(bg_record_t *bg_record, 
-					uint16_t quarter, uint16_t nodecard)
-{
-	bg_record_t *found_record = NULL;
-	int small_size = 4;
-	ba_node_t *new_ba_node = NULL;
-	ba_node_t *ba_node = NULL;
-	found_record = (bg_record_t*) xmalloc(sizeof(bg_record_t));
-				
-	found_record->job_running = NO_JOB_RUNNING;
-	found_record->user_name = xstrdup(bg_record->user_name);
-	found_record->user_uid = bg_record->user_uid;
-	found_record->bg_block_list = list_create(destroy_ba_node);
-	ba_node = list_peek(bg_record->bg_block_list);
-	if(!ba_node) {
-		hostlist_t hl = hostlist_create(bg_record->nodes);
-		char *host = hostlist_shift(hl);
-		hostlist_destroy(hl);
-		found_record->nodes = xstrdup(host);
-		free(host);
-		error("you gave me a list with no ba_nodes using %s", 
-		      found_record->nodes);
-	} else {
-		int i=0,j=0;
-		new_ba_node = ba_copy_node(ba_node);
-		for (i=0; i<BA_SYSTEM_DIMENSIONS; i++){
-			for(j=0;j<NUM_PORTS_PER_NODE;j++) {
-				ba_node->axis_switch[i].int_wire[j].used = 0;	
-				if(i!=X) {
-					if(j==3 || j==4) 
-						ba_node->axis_switch[i].
-							int_wire[j].
-							used = 1;	
-				}
-				ba_node->axis_switch[i].int_wire[j].
-					port_tar = j;
-			}
-		}
-		list_append(found_record->bg_block_list, new_ba_node);
-		found_record->bp_count = 1;
-		found_record->nodes = xstrdup_printf(
-			"%s%c%c%c", 
-			bg_slurm_node_prefix, 
-			alpha_num[ba_node->coord[X]],
-			alpha_num[ba_node->coord[Y]],
-			alpha_num[ba_node->coord[Z]]);
-	}
-
-	found_record->blrtsimage = xstrdup(bg_record->blrtsimage);
-	found_record->linuximage = xstrdup(bg_record->linuximage);
-	found_record->mloaderimage = xstrdup(bg_record->mloaderimage);
-	found_record->ramdiskimage = xstrdup(bg_record->ramdiskimage);
-
-	process_nodes(found_record, false);
-				
-	found_record->conn_type = SELECT_SMALL;
-				
-	found_record->node_use = SELECT_COPROCESSOR_MODE;
-
-	if(nodecard != (uint16_t) NO_VAL)
-		small_size = bluegene_bp_nodecard_cnt;
-	found_record->cpus_per_bp = procs_per_node/small_size;
-	found_record->node_cnt = bluegene_bp_node_cnt/small_size;
-	found_record->quarter = quarter; 
-	found_record->nodecard = nodecard;
-	
-	if(set_ionodes(found_record) == SLURM_ERROR) 
-		error("couldn't create ionode_bitmap for %d.%d",
-		      found_record->quarter, found_record->nodecard);
-	return found_record;
-}
-
-#else
 extern bg_record_t *create_small_record(bg_record_t *bg_record, 
 					bitstr_t *ionodes, int size)
 {
 	bg_record_t *found_record = NULL;
 	ba_node_t *new_ba_node = NULL;
 	ba_node_t *ba_node = NULL;
-#ifdef HAVE_BGL
-	int small_size = 4;
-#else
 	char bitstring[BITSIZE];
-#endif
+
 	found_record = (bg_record_t*) xmalloc(sizeof(bg_record_t));
 				
 	found_record->job_running = NO_JOB_RUNNING;
@@ -449,7 +343,7 @@ extern bg_record_t *create_small_record(bg_record_t *bg_record,
 		new_ba_node = ba_copy_node(ba_node);
 		for (i=0; i<BA_SYSTEM_DIMENSIONS; i++){
 			for(j=0;j<NUM_PORTS_PER_NODE;j++) {
-				ba_node->axis_switch[i].int_wire[j].used = 0;	
+				ba_node->axis_switch[i].int_wire[j].used = 0;
 				if(i!=X) {
 					if(j==3 || j==4) 
 						ba_node->axis_switch[i].
@@ -470,6 +364,7 @@ extern bg_record_t *create_small_record(bg_record_t *bg_record,
 			alpha_num[ba_node->coord[Z]]);
 	}
 #ifdef HAVE_BGL
+	found_record->node_use = SELECT_COPROCESSOR_MODE;
 	found_record->blrtsimage = xstrdup(bg_record->blrtsimage);
 #endif
 	found_record->linuximage = xstrdup(bg_record->linuximage);
@@ -480,348 +375,64 @@ extern bg_record_t *create_small_record(bg_record_t *bg_record,
 				
 	found_record->conn_type = SELECT_SMALL;
 				
-#ifdef HAVE_BGL
-	found_record->node_use = SELECT_COPROCESSOR_MODE;
-	if(nodecard != (uint16_t) NO_VAL)
-		small_size = bluegene_bp_nodecard_cnt;
-	found_record->cpus_per_bp = procs_per_node/small_size;
-	found_record->node_cnt = bluegene_bp_node_cnt/small_size;
-	found_record->quarter = quarter; 
-	found_record->nodecard = nodecard;
-	
-	if(set_ionodes(found_record) == SLURM_ERROR) 
-		error("couldn't create ionode_bitmap for %d.%d",
-		      found_record->quarter, found_record->nodecard);
-#else
 	xassert(bluegene_proc_ratio);
-	found_record->cpus_per_bp = bluegene_proc_ratio * size;
+	found_record->cpu_cnt = bluegene_proc_ratio * size;
 	found_record->node_cnt = size;
 
 	found_record->ionode_bitmap = bit_copy(ionodes);
 	bit_fmt(bitstring, BITSIZE, found_record->ionode_bitmap);
 	found_record->ionodes = xstrdup(bitstring);
-#endif
+
 	return found_record;
 }
-#endif
 
 /*********************** Local Functions *************************/
 
-#ifdef HAVE_BGL
 static int _split_block(List block_list, List new_blocks,
-			bg_record_t *bg_record, int procs) 
+			bg_record_t *bg_record, int cnodes) 
 {
-	bg_record_t *found_record = NULL;
 	bool full_bp = false; 
-	int small_count = 0;
-	int small_size = 0;
-	uint16_t num_nodecard = 0, num_quarter = 0;
-	int i;
-	int node_cnt = 0;
-	uint16_t quarter = 0;
-	uint16_t nodecard = 0;
-
-	if(bg_record->quarter == (uint16_t) NO_VAL)
-		full_bp = true;
-	
-	if(procs == (procs_per_node/bluegene_bp_nodecard_cnt) 
-	   && bluegene_nodecard_ionode_cnt) {
-		num_nodecard=4;
-		if(full_bp)
-			num_quarter=3;
-	} else if(full_bp) {
-		num_quarter = 4;
-	} else {
-		error("you asked for something that was already this size");
-		return SLURM_ERROR;
-	}
-	debug2("asking for %d 32s from a %d block",
-	       num_nodecard, bg_record->node_cnt);
-	small_count = num_nodecard+num_quarter; 
-
-	/* break base partition up into 16 parts */
-	small_size = bluegene_bp_node_cnt/bluegene_nodecard_node_cnt;
-	node_cnt = 0;
-	if(!full_bp)
-		quarter = bg_record->quarter;
-	else
-		quarter = 0;
-	nodecard = 0;
-	for(i=0; i<small_count; i++) {
-		if(i == num_nodecard) {
-			/* break base partition up into 4 parts */
-			small_size = 4;
-		}
-		
-		if(small_size == 4)
-			nodecard = (uint16_t)NO_VAL;
-		else
-			nodecard = i%4; 
-		found_record = create_small_record(bg_record,
-						   quarter,
-						   nodecard);
-		list_append(new_blocks, found_record);
-				
-		node_cnt += bluegene_bp_node_cnt/small_size;
-		if(node_cnt == 128) {
-			node_cnt = 0;
-			quarter++;
-		}
-	}
-		
-	return SLURM_SUCCESS;
-}
-
-static int _breakup_blocks(List block_list, List new_blocks,
-			   ba_request_t *request, List my_block_list)
-{
-	int rc = SLURM_ERROR;
-	bg_record_t *bg_record = NULL;
-	ListIterator itr;
-	int proc_cnt=0;
-	int total_proc_cnt=0;
-	uint16_t last_quarter = (uint16_t) NO_VAL;
-	char tmp_char[256];
-	
-	debug2("proc count = %d size = %d",
-	       request->procs, request->size);
+	bitoff_t start = 0;
+	blockreq_t blockreq;
 	
-	itr = list_iterator_create(block_list);			
-	while ((bg_record = (bg_record_t *) list_next(itr)) != NULL) {
-		if(bg_record->job_running != NO_JOB_RUNNING)
-			continue;
-		if(bg_record->state != RM_PARTITION_FREE)
-			continue;
-		if (request->avail_node_bitmap &&
-		    !bit_super_set(bg_record->bitmap,
-				   request->avail_node_bitmap)) {
-			debug2("bg block %s has nodes not usable by this job",
-			       bg_record->bg_block_id);
-			continue;
-		}
+	memset(&blockreq, 0, sizeof(blockreq_t));
 
-		if(request->start_req) {
-			if ((request->start[X] != bg_record->start[X])
-			    || (request->start[Y] != bg_record->start[Y])
-			    || (request->start[Z] != bg_record->start[Z])) {
-				debug4("small got %c%c%c looking for %c%c%c",
-				       alpha_num[bg_record->start[X]],
-				       alpha_num[bg_record->start[Y]],
-				       alpha_num[bg_record->start[Z]],
-				       alpha_num[request->start[X]],
-				       alpha_num[request->start[Y]],
-				       alpha_num[request->start[Z]]);
-				continue;
-			}
-			debug3("small found %c%c%c looking for %c%c%c",
-			       alpha_num[bg_record->start[X]],
-			       alpha_num[bg_record->start[Y]],
-			       alpha_num[bg_record->start[Z]],
-			       alpha_num[request->start[X]],
-			       alpha_num[request->start[Y]],
-			       alpha_num[request->start[Z]]);
-		}
-		proc_cnt = bg_record->bp_count * 
-			bg_record->cpus_per_bp;
-		if(proc_cnt == request->procs) {
-			debug2("found it here %s, %s",
-			       bg_record->bg_block_id,
-			       bg_record->nodes);
-			request->save_name = xstrdup_printf(
-				"%c%c%c",
-				alpha_num[bg_record->start[X]],
-				alpha_num[bg_record->start[Y]],
-				alpha_num[bg_record->start[Z]]);
-			rc = SLURM_SUCCESS;
-			goto finished;
-		}
-		if(bg_record->node_cnt > bluegene_bp_node_cnt)
-			continue;
-		if(proc_cnt < request->procs) {
-			if(last_quarter != bg_record->quarter){
-				last_quarter = bg_record->quarter;
-				total_proc_cnt = proc_cnt;
-			} else {
-				total_proc_cnt += proc_cnt;
-			}
-			debug2("1 got %d on quarter %d",
-			       total_proc_cnt, last_quarter);
-			if(total_proc_cnt == request->procs) {
-				request->save_name = xstrdup_printf(
-					"%c%c%c",
-					alpha_num[bg_record->start[X]],
-					alpha_num[bg_record->start[Y]],
-					alpha_num[bg_record->start[Z]]);
-				if(!my_block_list) {
-					rc = SLURM_SUCCESS;
-					goto finished;	
-				}
-						
-				bg_record = create_small_record(
-					bg_record,
-					last_quarter,
-					(uint16_t) NO_VAL);
-				list_append(new_blocks, bg_record);
-							
-				rc = SLURM_SUCCESS;
-				goto finished;	
-			}
-			continue;
-		}
+	switch(bg_record->node_cnt) {
+#ifdef HAVE_BGL
+	case 32:
+		error("We got a 32 we should never have this");
+		goto finished;
 		break;
-	}
-	if(bg_record) {
-		debug2("got one on the first pass");
-		goto found_one;
-	}
-	list_iterator_reset(itr);
-	last_quarter = (uint16_t) NO_VAL;
-	while ((bg_record = (bg_record_t *) list_next(itr)) 
-	       != NULL) {
-		if(bg_record->job_running != NO_JOB_RUNNING)
-			continue;
-		if (request->avail_node_bitmap &&
-		    !bit_super_set(bg_record->bitmap,
-				   request->avail_node_bitmap)) {
-			debug2("bg block %s has nodes not usable by this job",
-			       bg_record->bg_block_id);
-			continue;
-		}
-
-		if(request->start_req) {
-			if ((request->start[X] != bg_record->start[X])
-			    || (request->start[Y] != bg_record->start[Y])
-			    || (request->start[Z] != bg_record->start[Z])) {
-				debug4("small 2 got %c%c%c looking for %c%c%c",
-				       alpha_num[bg_record->start[X]],
-				       alpha_num[bg_record->start[Y]],
-				       alpha_num[bg_record->start[Z]],
-				       alpha_num[request->start[X]],
-				       alpha_num[request->start[Y]],
-				       alpha_num[request->start[Z]]);
-				continue;
-			}
-			debug3("small 2 found %c%c%c looking for %c%c%c",
-			       alpha_num[bg_record->start[X]],
-			       alpha_num[bg_record->start[Y]],
-			       alpha_num[bg_record->start[Z]],
-			       alpha_num[request->start[X]],
-			       alpha_num[request->start[Y]],
-			       alpha_num[request->start[Z]]);
-		}
-				
-		proc_cnt = bg_record->bp_count * bg_record->cpus_per_bp;
-		if(proc_cnt == request->procs) {
-			debug2("found it here %s, %s",
-			       bg_record->bg_block_id,
-			       bg_record->nodes);
-			request->save_name = xstrdup_printf(
-				"%c%c%c",
-				alpha_num[bg_record->start[X]],
-				alpha_num[bg_record->start[Y]],
-				alpha_num[bg_record->start[Z]]);
-			rc = SLURM_SUCCESS;
+	case 128:
+		switch(cnodes) {
+		case 32:			
+			blockreq.small32 = 4;
+			break;
+		default:
+			error("We don't make a %d from size %d", 
+			      cnodes, bg_record->node_cnt);
 			goto finished;
-		} 
-
-		if(bg_record->node_cnt > bluegene_bp_node_cnt)
-			continue;
-		if(proc_cnt < request->procs) {
-			if(last_quarter != bg_record->quarter){
-				last_quarter = bg_record->quarter;
-				total_proc_cnt = proc_cnt;
-			} else {
-				total_proc_cnt += proc_cnt;
-			}
-			debug2("got %d on quarter %d",
-			       total_proc_cnt, last_quarter);
-			if(total_proc_cnt == request->procs) {
-				request->save_name = xstrdup_printf(
-					"%c%c%c",
-					alpha_num[bg_record->start[X]],
-					alpha_num[bg_record->start[Y]],
-					alpha_num[bg_record->start[Z]]);
-				if(!my_block_list) {
-					rc = SLURM_SUCCESS;
-					goto finished;	
-				}
-				bg_record = create_small_record(
-					bg_record,
-					last_quarter,
-					(uint16_t) NO_VAL);
-				list_append(new_blocks, bg_record);
-								
-				rc = SLURM_SUCCESS;
-				goto finished;	
-			}
-			continue;
-		}				
-		break;
-	}
-found_one:
-	if(bg_record) {
-		List temp_list = NULL;
-		bg_record_t *found_record = NULL;
-
-		if(bg_record->original) {
-			debug3("This was a copy");
-			found_record = bg_record->original;
-		} else {
-			debug3("looking for original");
-			found_record = find_org_in_bg_list(
-				bg_list, bg_record);
+			break;
 		}
-		if(!found_record) {
-			error("this record wasn't found in the list!");
-			rc = SLURM_ERROR;
+		break;
+	default:
+		switch(cnodes) {
+		case 32:			
+			blockreq.small32 = 4;
+			blockreq.small128 = 3;
+			break;
+		case 128:				
+			blockreq.small128 = 4;
+			break;
+		default:
+			error("We don't make a %d from size %d", 
+			      cnodes, bg_record->node_cnt);
 			goto finished;
+			break;
 		}
-		
-		format_node_name(found_record, tmp_char, sizeof(tmp_char));
-			
-		debug2("going to split %s, %s",
-		       found_record->bg_block_id,
-		       tmp_char);
-		request->save_name = xstrdup_printf(
-			"%c%c%c",
-			alpha_num[found_record->start[X]],
-			alpha_num[found_record->start[Y]],
-			alpha_num[found_record->start[Z]]);
-		if(!my_block_list) {
-			rc = SLURM_SUCCESS;
-			goto finished;	
-		}
-		_split_block(block_list, new_blocks,
-			     found_record, request->procs);
-		remove_from_bg_list(block_list, bg_record);
-		destroy_bg_record(bg_record);
-		remove_from_bg_list(bg_list, found_record);
-		temp_list = list_create(NULL);
-		list_push(temp_list, found_record);
-		num_block_to_free++;
-		free_block_list(temp_list);
-		list_destroy(temp_list);
-		rc = SLURM_SUCCESS;
-		goto finished;
-	}
-	
-finished:
-	list_iterator_destroy(itr);
-		
-	return rc;
-}
+		full_bp = true;
+		break;
 #else
-
-static int _split_block(List block_list, List new_blocks,
-			bg_record_t *bg_record, int cnodes) 
-{
-	bool full_bp = false; 
-	bitoff_t start = 0;
-	blockreq_t blockreq;
-	
-	memset(&blockreq, 0, sizeof(blockreq_t));
-
-	switch(bg_record->node_cnt) {
 	case 16:
 		error("We got a 16 we should never have this");
 		goto finished;
@@ -937,17 +548,27 @@ static int _split_block(List block_list, List new_blocks,
 		}
 		full_bp = true;
 		break;
+#endif
 	}
 
-	if(!full_bp && bg_record->ionode_bitmap)
-		start = bit_ffs(bg_record->ionode_bitmap);
+	if(!full_bp && bg_record->ionode_bitmap) {
+		if((start = bit_ffs(bg_record->ionode_bitmap)) == -1)
+			start = 0;		
+	}
 
+#ifdef HAVE_BGL
+	debug2("Asking for %u 32CNBlocks, and %u 128CNBlocks "
+	       "from a %u block, starting at ionode %d.", 
+	       blockreq.small32, blockreq.small128, 
+	       bg_record->node_cnt, start);
+#else
 	debug2("Asking for %u 16CNBlocks, %u 32CNBlocks, "
 	       "%u 64CNBlocks, %u 128CNBlocks, and %u 256CNBlocks"
 	       "from a %u block, starting at ionode %d.", 
 	       blockreq.small16, blockreq.small32, 
 	       blockreq.small64, blockreq.small128, 
 	       blockreq.small256, bg_record->node_cnt, start);
+#endif
 	handle_small_record_request(new_blocks, &blockreq, bg_record, start);
 
 finished:
@@ -997,7 +618,7 @@ static int _breakup_blocks(List block_list, List new_blocks,
 	 * smallest blocks.
 	 */
 again:		
-	while ((bg_record = (bg_record_t *) list_next(itr)) != NULL) {
+	while ((bg_record = list_next(itr))) {
 		if(bg_record->job_running != NO_JOB_RUNNING)
 			continue;
 		/* on the third time through look for just a block
@@ -1056,7 +677,7 @@ again:
 		}
 		/* lets see if we can combine some small ones */
 		if(bg_record->node_cnt < cnodes) {
-			//char bitstring[BITSIZE];
+			char bitstring[BITSIZE];
 			bitstr_t *bitstr = NULL;
 			bit_or(ionodes, bg_record->ionode_bitmap);
 
@@ -1076,9 +697,12 @@ again:
 			} else
 				total_cnode_cnt += bg_record->node_cnt;
 
-			//bit_fmt(bitstring, BITSIZE, ionodes);
-			debug2("1 adding %d got %d set",
-			       bg_record->node_cnt, total_cnode_cnt);
+			bit_fmt(bitstring, BITSIZE, ionodes);
+			debug2("1 adding %s %d got %d set "
+			       "ionodes %s total is %s",
+			       bg_record->bg_block_id, 
+			       bg_record->node_cnt, total_cnode_cnt,
+			       bg_record->ionodes, bitstring);
 			if(total_cnode_cnt == cnodes) {
 				request->save_name = xstrdup_printf(
 					"%c%c%c",
@@ -1168,5 +792,3 @@ finished:
 		
 	return rc;
 }
-
-#endif
diff --git a/src/plugins/select/bluegene/plugin/dynamic_block.h b/src/plugins/select/bluegene/plugin/dynamic_block.h
index 06ad3d706..9e65a27de 100644
--- a/src/plugins/select/bluegene/plugin/dynamic_block.h
+++ b/src/plugins/select/bluegene/plugin/dynamic_block.h
@@ -42,9 +42,10 @@
 #include "bluegene.h"
 
 extern List create_dynamic_block(List block_list,
-				 ba_request_t *request, List my_block_list);
+				 ba_request_t *request, List my_block_list,
+				 bool track_down_nodes);
 
-#ifdef HAVE_BGL 
+#ifdef HAVE_BGQ 
 extern bg_record_t *create_small_record(bg_record_t *bg_record, 
 					uint16_t quarter, uint16_t nodecard);
 #else
diff --git a/src/plugins/select/bluegene/plugin/select_bluegene.c b/src/plugins/select/bluegene/plugin/select_bluegene.c
index a1ef04406..16364b372 100644
--- a/src/plugins/select/bluegene/plugin/select_bluegene.c
+++ b/src/plugins/select/bluegene/plugin/select_bluegene.c
@@ -1,7 +1,7 @@
 /*****************************************************************************\
  *  select_bluegene.c - node selection plugin for Blue Gene system.
  * 
- *  $Id: select_bluegene.c 16146 2009-01-06 18:20:48Z da $
+ *  $Id: select_bluegene.c 17175 2009-04-07 17:24:20Z da $
  *****************************************************************************
  *  Copyright (C) 2004-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -85,12 +85,12 @@ const char plugin_type[]       	= "select/bluegene";
 const uint32_t plugin_version	= 100;
 
 /* pthread stuff for updating BG node status */
-static pthread_t bluegene_thread = 0;
+static pthread_t block_thread = 0;
+static pthread_t state_thread = 0;
 static pthread_mutex_t thread_flag_mutex = PTHREAD_MUTEX_INITIALIZER;
 
 /** initialize the status pthread */
 static int _init_status_pthread(void);
-static int _wait_for_thread (pthread_t thread_id);
 static char *_block_state_str(int state);
 
 extern int select_p_alter_node_cnt(enum select_node_cnt type, void *data);
@@ -108,22 +108,21 @@ extern int init ( void )
 	fatal("SYSTEM_DIMENSIONS value (%d) invalid for Blue Gene",
 		SYSTEM_DIMENSIONS);
 #endif
-#ifdef HAVE_BG_FILES
-	if (!getenv("CLASSPATH") || !getenv("DB2INSTANCE") 
-	||  !getenv("VWSPATH"))
-		fatal("db2profile has not been run to setup DB2 environment");
-
-	if ((SELECT_MESH  != RM_MESH)
-	||  (SELECT_TORUS != RM_TORUS)
-	||  (SELECT_NAV   != RM_NAV))
-		fatal("enum conn_type out of sync with rm_api.h");
 
+#ifdef HAVE_BG_FILES
 #ifdef HAVE_BGL
+	if (!getenv("CLASSPATH") || !getenv("DB2INSTANCE")
+	    || !getenv("VWSPATH"))
+		fatal("db2profile has not been run to setup DB2 environment");
+	
 	if ((SELECT_COPROCESSOR_MODE  != RM_PARTITION_COPROCESSOR_MODE)
-	||  (SELECT_VIRTUAL_NODE_MODE != RM_PARTITION_VIRTUAL_NODE_MODE))
+	    || (SELECT_VIRTUAL_NODE_MODE != RM_PARTITION_VIRTUAL_NODE_MODE))
 		fatal("enum node_use_type out of sync with rm_api.h");
 #endif
-	
+	if ((SELECT_MESH  != RM_MESH)
+	    || (SELECT_TORUS != RM_TORUS)
+	    || (SELECT_NAV   != RM_NAV))
+		fatal("enum conn_type out of sync with rm_api.h");
 #endif
 
 	verbose("%s loading...", plugin_name);
@@ -138,37 +137,29 @@ static int _init_status_pthread(void)
 	pthread_attr_t attr;
 
 	pthread_mutex_lock( &thread_flag_mutex );
-	if ( bluegene_thread ) {
-		debug2("Bluegene thread already running, not starting "
-			"another");
+	if ( block_thread ) {
+		debug2("Bluegene threads already running, not starting "
+		       "another");
 		pthread_mutex_unlock( &thread_flag_mutex );
 		return SLURM_ERROR;
 	}
 
 	slurm_attr_init( &attr );
-	pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_DETACHED );
-	if (pthread_create( &bluegene_thread, &attr, bluegene_agent, NULL)
+	/* since we do a join on this later we don't make it detached */
+	if (pthread_create( &block_thread, &attr, block_agent, NULL)
 	    != 0)
-		error("Failed to create bluegene_agent thread");
+		error("Failed to create block_agent thread");
+	slurm_attr_init( &attr );
+	/* since we do a join on this later we don't make it detached */
+	if (pthread_create( &state_thread, &attr, state_agent, NULL)
+	    != 0)
+		error("Failed to create state_agent thread");
 	pthread_mutex_unlock( &thread_flag_mutex );
 	slurm_attr_destroy( &attr );
 
 	return SLURM_SUCCESS;
 }
 
-static int _wait_for_thread (pthread_t thread_id)
-{
-	int i;
-
-	for (i=0; i<4; i++) {
-		if (pthread_kill(thread_id, 0))
-			return SLURM_SUCCESS;
-		sleep(1);
-	}
-	error("Could not kill select script pthread");
-	return SLURM_ERROR;
-}
-
 static char *_block_state_str(int state)
 {
 	static char tmp[16];
@@ -190,15 +181,18 @@ extern int fini ( void )
 {
 	int rc = SLURM_SUCCESS;
 
-	pthread_mutex_lock( &thread_flag_mutex );
 	agent_fini = true;
-	if ( bluegene_thread ) {
+	pthread_mutex_lock( &thread_flag_mutex );
+	if ( block_thread ) {
 		verbose("Bluegene select plugin shutting down");
-		rc = _wait_for_thread(bluegene_thread);
-		bluegene_thread = 0;
+		pthread_join(block_thread, NULL);
+		block_thread = 0;
+	}
+	if ( state_thread ) {
+		pthread_join(state_thread, NULL);
+		state_thread = 0;
 	}
 	pthread_mutex_unlock( &thread_flag_mutex );
-
 	fini_bg();
 
 	return rc;
@@ -370,7 +364,7 @@ extern int select_p_job_init(List job_list)
 extern int select_p_node_init(struct node_record *node_ptr, int node_cnt)
 {
 	if(node_cnt>0)
-		if(node_ptr->cpus > 512)
+		if(node_ptr->cpus >= bluegene_bp_node_cnt)
 			procs_per_node = node_ptr->cpus;
 	return SLURM_SUCCESS;
 }
@@ -476,7 +470,7 @@ extern int select_p_pack_node_info(time_t last_query_time, Buf *buffer_ptr)
 		debug2("Node select info hasn't changed since %d", 
 			last_bg_update);
 		return SLURM_NO_CHANGE_IN_DATA;
-	} else {
+	} else if(blocks_are_created) {
 		*buffer_ptr = NULL;
 		buffer = init_buf(HUGE_BUF_SIZE);
 		pack32(blocks_packed, buffer);
@@ -485,10 +479,7 @@ extern int select_p_pack_node_info(time_t last_query_time, Buf *buffer_ptr)
 		if(bg_list) {
 			slurm_mutex_lock(&block_state_mutex);
 			itr = list_iterator_create(bg_list);
-			while ((bg_record = (bg_record_t *) list_next(itr)) 
-			       != NULL) {
-				xassert(bg_record->bg_block_id != NULL);
-				
+			while ((bg_record = list_next(itr))) {
 				pack_block(bg_record, buffer);
 				blocks_packed++;
 			}
@@ -521,6 +512,9 @@ extern int select_p_pack_node_info(time_t last_query_time, Buf *buffer_ptr)
 		set_buf_offset(buffer, tmp_offset);
 		
 		*buffer_ptr = buffer;
+	} else {
+		error("select_p_pack_node_info: bg_list not ready yet");
+		return SLURM_ERROR;
 	}
 
 	return SLURM_SUCCESS;
@@ -623,23 +617,9 @@ extern int select_p_update_block (update_part_msg_t *part_desc_ptr)
 	}
 
 	if(!part_desc_ptr->state_up) {
-		/* Since we are putting this block in an error state we need
-		   to wait for the job to be removed.  We don't really
-		   need to free the block though since we may just
-		   want it to be in an error state for some reason. */
-		while(bg_record->job_running > NO_JOB_RUNNING) 
-			sleep(1);
-		
-		slurm_mutex_lock(&block_state_mutex);
-		bg_record->job_running = BLOCK_ERROR_STATE;
-		bg_record->state = RM_PARTITION_ERROR;
-		slurm_mutex_unlock(&block_state_mutex);
-		trigger_block_error();
+		put_block_in_error_state(bg_record, BLOCK_ERROR_STATE);
 	} else if(part_desc_ptr->state_up){
-		slurm_mutex_lock(&block_state_mutex);
-		bg_record->job_running = NO_JOB_RUNNING;
-		bg_record->state = RM_PARTITION_FREE;
-		slurm_mutex_unlock(&block_state_mutex);
+		resume_block(bg_record);
 	} else {
 		return rc;
 	}
@@ -652,19 +632,12 @@ extern int select_p_update_block (update_part_msg_t *part_desc_ptr)
 extern int select_p_update_sub_node (update_part_msg_t *part_desc_ptr)
 {
 	int rc = SLURM_SUCCESS;
-	bg_record_t *bg_record = NULL, *found_record = NULL;
-	time_t now;
-	char reason[128], tmp[64], time_str[32];
-	blockreq_t blockreq; 
 	int i = 0, j = 0;
-	char coord[BA_SYSTEM_DIMENSIONS];
+	char coord[BA_SYSTEM_DIMENSIONS+1], *node_name = NULL;
 	char ionodes[128];
 	int set = 0;
-	int set_error = 0;
+	double nc_pos = 0, last_pos = -1;
 	bitstr_t *ionode_bitmap = NULL;
-	List requests = NULL;
-	List delete_list = NULL;
-	ListIterator itr;
 	
 	if(bluegene_layout_mode != LAYOUT_DYNAMIC) {
 		info("You can't use this call unless you are on a Dynamically "
@@ -673,19 +646,14 @@ extern int select_p_update_sub_node (update_part_msg_t *part_desc_ptr)
 		goto end_it;
 	}
 
-	memset(coord, -1, BA_SYSTEM_DIMENSIONS);
+	memset(coord, 0, sizeof(coord));
 	memset(ionodes, 0, 128);
 	if(!part_desc_ptr->name) {
 		error("update_sub_node: No name specified");
 		rc = SLURM_ERROR;
 		goto end_it;
-				
 	}
 
-	now = time(NULL);
-	slurm_make_time_str(&now, time_str, sizeof(time_str));
-	snprintf(tmp, sizeof(tmp), "[SLURM@%s]", time_str);
-			
 	while (part_desc_ptr->name[j] != '\0') {
 		if (part_desc_ptr->name[j] == '[') {
 			if(set<1) {
@@ -736,9 +704,9 @@ extern int select_p_update_sub_node (update_part_msg_t *part_desc_ptr)
 					goto end_it;
 				}
 			}
+			
 			strncpy(coord, part_desc_ptr->name+j,
 				BA_SYSTEM_DIMENSIONS); 
-			
 			j += BA_SYSTEM_DIMENSIONS-1;
 			set++;
 		}
@@ -752,165 +720,42 @@ extern int select_p_update_sub_node (update_part_msg_t *part_desc_ptr)
 		goto end_it;
 	}
 	ionode_bitmap = bit_alloc(bluegene_numpsets);
-	bit_unfmt(ionode_bitmap, ionodes);		
-
-	requests = list_create(destroy_bg_record);
-	memset(&blockreq, 0, sizeof(blockreq_t));
-
-	blockreq.block = coord;
-	blockreq.conn_type = SELECT_SMALL;
-	blockreq.small32 = bluegene_bp_nodecard_cnt;
-
-	add_bg_record(requests, NULL, &blockreq);
-	
-	delete_list = list_create(NULL);
-	while((bg_record = list_pop(requests))) {
-		set_error = 0;
-		if(bit_overlap(bg_record->ionode_bitmap, ionode_bitmap))
-			set_error = 1;
-		
-		slurm_mutex_lock(&block_state_mutex);
-		itr = list_iterator_create(bg_list);
-		while((found_record = list_next(itr))) {
-			if(!found_record || (bg_record == found_record))
-				continue;
-			if(bit_equal(bg_record->bitmap, found_record->bitmap)
-			   && bit_equal(bg_record->ionode_bitmap, 
-					found_record->ionode_bitmap)) {
-				debug2("block %s[%s] already there",
-				       found_record->nodes, 
-				       found_record->ionodes);
-				/* we don't need to set this error, it
-				   doesn't overlap
-				*/
-				if(!set_error)
-					break;
-				
-				snprintf(reason, sizeof(reason),
-					 "update_sub_node: "
-					 "Admin set block %s state to %s %s",
-					 found_record->bg_block_id, 
-					 _block_state_str(
-						 part_desc_ptr->state_up),
-					 tmp); 
-				info("%s",reason);
-				if(found_record->job_running 
-				   > NO_JOB_RUNNING) {
-					slurm_fail_job(
-						found_record->job_running);
+	bit_unfmt(ionode_bitmap, ionodes);
+	if(bit_ffs(ionode_bitmap) == -1) {
+		error("update_sub_node: Invalid ionode '%s' given.", ionodes);
+		rc = SLURM_ERROR;
+		FREE_NULL_BITMAP(ionode_bitmap);
+		goto end_it;		
+	}
+	node_name = xstrdup_printf("%s%s", bg_slurm_node_prefix, coord);
+	/* find out how many nodecards to get for each ionode */
+	if(!part_desc_ptr->state_up) {
+		info("Admin setting %s[%s] in an error state",
+		     node_name, ionodes);
+		for(i = 0; i<bluegene_numpsets; i++) {
+			if(bit_test(ionode_bitmap, i)) {
+				if((int)nc_pos != (int)last_pos) {
+					down_nodecard(node_name, i);
+					last_pos = nc_pos;
 				}
-			
-				if(!part_desc_ptr->state_up) {
-					found_record->job_running =
-						BLOCK_ERROR_STATE;
-					found_record->state =
-						RM_PARTITION_ERROR;
-					trigger_block_error();
-				} else if(part_desc_ptr->state_up){
-					found_record->job_running =
-						NO_JOB_RUNNING;
-					found_record->state =
-						RM_PARTITION_FREE;
-				} else {
-					error("update_sub_node: "
-					      "Unknown state %d given",
-					      part_desc_ptr->state_up);
-					rc = SLURM_ERROR;
-					break;
-				}	
-				break;
-			} else if(!set_error
-				  && bit_equal(bg_record->bitmap,
-					       found_record->bitmap)
-				  && bit_overlap(
-					  bg_record->ionode_bitmap, 
-					  found_record->ionode_bitmap)) {
-				break;
 			}
-			
-		}
-		list_iterator_destroy(itr);
-		slurm_mutex_unlock(&block_state_mutex);
-		/* we already found an existing record */
-		if(found_record) {
-			destroy_bg_record(bg_record);
-			continue;
-		}
-		/* we need to add this record since it doesn't exist */
-		if(configure_block(bg_record) == SLURM_ERROR) {
-			destroy_bg_record(bg_record);
-			error("update_sub_node: "
-			      "unable to configure block in api");
-		}
-		debug2("adding block %s to fill in small blocks "
-		       "around bad blocks",
-		       bg_record->bg_block_id);
-		print_bg_record(bg_record);
-		slurm_mutex_lock(&block_state_mutex);
-		list_append(bg_list, bg_record);
-		slurm_mutex_unlock(&block_state_mutex);
-		
-		/* We are just adding the block not deleting any or
-		   setting this one to an error state.
-		*/
-		if(!set_error)
-			continue;
-				
-		if(!part_desc_ptr->state_up) {
-			bg_record->job_running = BLOCK_ERROR_STATE;
-			bg_record->state = RM_PARTITION_ERROR;
-			trigger_block_error();
-		} else if(part_desc_ptr->state_up){
-			bg_record->job_running = NO_JOB_RUNNING;
-			bg_record->state = RM_PARTITION_FREE;
-		} else {
-			error("update_sub_node: Unknown state %d given",
-			      part_desc_ptr->state_up);
-			rc = SLURM_ERROR;
-			continue;
+			nc_pos += bluegene_nc_ratio;
 		}
-		snprintf(reason, sizeof(reason),
-			 "update_sub_node: "
-			 "Admin set block %s state to %s %s",
-			 bg_record->bg_block_id, 
-			 _block_state_str(part_desc_ptr->state_up),
-			 tmp); 
-		info("%s",reason);
-				
-		/* remove overlapping blocks */
-		slurm_mutex_lock(&block_state_mutex);
-		itr = list_iterator_create(bg_list);
-		while((found_record = list_next(itr))) {
-			if ((!found_record) || (bg_record == found_record))
-				continue;
-			if(!blocks_overlap(bg_record, found_record)) {
-				debug2("block %s isn't part of %s",
-				       found_record->bg_block_id, 
-				       bg_record->bg_block_id);
-				continue;
-			}
-			debug2("removing block %s because there is something "
-			       "wrong with part of the base partition",
-			       found_record->bg_block_id);
-			if(found_record->job_running > NO_JOB_RUNNING) {
-				slurm_fail_job(found_record->job_running);
-			}
-			list_push(delete_list, found_record);
-			list_remove(itr);
-			num_block_to_free++;
-		}		
-		list_iterator_destroy(itr);
-		free_block_list(delete_list);
-		slurm_mutex_unlock(&block_state_mutex);		
+	} else if(part_desc_ptr->state_up){
+		info("Admin setting %s[%s] in an free state",
+		     node_name, ionodes);
+		up_nodecard(node_name, ionode_bitmap);
+	} else {
+		error("update_sub_node: Unknown state %d", 
+		      part_desc_ptr->state_up);
+		rc = SLURM_ERROR;
 	}
-	list_destroy(delete_list);
+	
 	FREE_NULL_BITMAP(ionode_bitmap);
-		
-	/* This only works for the error state, not free */
+	xfree(node_name);
 	
 	last_bg_update = time(NULL);
-	
-end_it:	
+end_it:
 	return rc;
 }
 
diff --git a/src/plugins/select/bluegene/plugin/sfree.c b/src/plugins/select/bluegene/plugin/sfree.c
index b93611723..36db832cf 100644
--- a/src/plugins/select/bluegene/plugin/sfree.c
+++ b/src/plugins/select/bluegene/plugin/sfree.c
@@ -1,6 +1,6 @@
 /*****************************************************************************\
  *  sfree.c - free specified block or all blocks.
- *  $Id: sfree.c 15597 2008-11-04 22:05:21Z da $
+ *  $Id: sfree.c 16357 2009-01-30 18:05:07Z da $
  *****************************************************************************
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -284,7 +284,8 @@ static int _free_block(delete_record_t *delete_record)
 		if (delete_record->state != (rm_partition_state_t)NO_VAL
 		    && delete_record->state != RM_PARTITION_FREE 
 		    && delete_record->state != RM_PARTITION_DEALLOCATING) {
-			info("bridge_destroy %s",delete_record->bg_block_id);
+			info("bridge_destroy %s", delete_record->bg_block_id);
+#ifdef HAVE_BG_FILES
 			if ((rc = bridge_destroy_block(
 				     delete_record->bg_block_id))
 			    != STATUS_OK) {
@@ -296,6 +297,9 @@ static int _free_block(delete_record_t *delete_record)
 				      delete_record->bg_block_id,
 				      _bg_err_str(rc));
 			}
+#else
+			bg_record->state = RM_PARTITION_FREE;	
+#endif
 		}
 		
 		if(!wait_full) {
@@ -306,8 +310,12 @@ static int _free_block(delete_record_t *delete_record)
 		}
 
 		if ((delete_record->state == RM_PARTITION_FREE)
-		    ||  (delete_record->state == RM_PARTITION_ERROR))
+#ifdef HAVE_BGL
+		    ||  (delete_record->state == RM_PARTITION_ERROR)
+#endif
+			) {
 			break;
+		}
 		sleep(3);
 	}
 	info("bgblock %s is freed", delete_record->bg_block_id);
diff --git a/src/plugins/select/bluegene/plugin/slurm_prolog.c b/src/plugins/select/bluegene/plugin/slurm_prolog.c
index f4ad1d020..07f27e96e 100644
--- a/src/plugins/select/bluegene/plugin/slurm_prolog.c
+++ b/src/plugins/select/bluegene/plugin/slurm_prolog.c
@@ -125,8 +125,11 @@ static int _wait_part_ready(uint32_t job_id)
 			break;				/* fatal error */
 		if (rc == READY_JOB_ERROR)		/* error */
 			continue;			/* retry */
-		if ((rc & READY_JOB_STATE) == 0)	/* job killed */
+		if ((rc & READY_JOB_STATE) == 0) {	/* job killed */
+			/* return 1 so we don't get a prolog error */
+			is_ready = 1;
 			break;
+		}
 		if (rc & READY_NODE_STATE) {		/* job and node ready */
 			is_ready = 1;
 			break;
diff --git a/src/plugins/select/bluegene/plugin/state_test.c b/src/plugins/select/bluegene/plugin/state_test.c
index aefd147e1..0d32e0e2c 100644
--- a/src/plugins/select/bluegene/plugin/state_test.c
+++ b/src/plugins/select/bluegene/plugin/state_test.c
@@ -2,7 +2,7 @@
  *  state_test.c - Test state of Bluegene base partitions and switches. 
  *  DRAIN nodes in SLURM that are not usable. 
  *
- *  $Id: state_test.c 15611 2008-11-05 23:28:45Z da $
+ *  $Id: state_test.c 17202 2009-04-09 16:56:23Z da $
  *****************************************************************************
  *  Copyright (C) 2004-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -55,7 +55,7 @@
 #ifdef HAVE_BG_FILES
 
 /* Find the specified BlueGene node ID and drain it from SLURM */
-static void _configure_node_down(rm_bp_id_t bp_id, my_bluegene_t *bg)
+static void _configure_node_down(rm_bp_id_t bp_id, my_bluegene_t *my_bg)
 {
 	int bp_num, i, rc;
 	rm_bp_id_t bpid;
@@ -65,21 +65,21 @@ static void _configure_node_down(rm_bp_id_t bp_id, my_bluegene_t *bg)
 	char bg_down_node[128], reason[128], time_str[32];
 	time_t now = time(NULL);
 
-	if ((rc = bridge_get_data(bg, RM_BPNum, &bp_num)) != STATUS_OK) {
+	if ((rc = bridge_get_data(my_bg, RM_BPNum, &bp_num)) != STATUS_OK) {
 		error("bridge_get_data(RM_BPNum): %s", bg_err_str(rc));
 		bp_num = 0;
 	}
 
 	for (i=0; i<bp_num; i++) {
 		if (i) {
-			if ((rc = bridge_get_data(bg, RM_NextBP, &my_bp)) 
+			if ((rc = bridge_get_data(my_bg, RM_NextBP, &my_bp)) 
 			    != STATUS_OK) {
 				error("bridge_get_data(RM_NextBP): %s", 
 				      bg_err_str(rc));
 				continue;
 			}
 		} else {
-			if ((rc = bridge_get_data(bg, RM_FirstBP, &my_bp)) 
+			if ((rc = bridge_get_data(my_bg, RM_FirstBP, &my_bp)) 
 			    != 
 			    STATUS_OK) {
 				error("bridge_get_data(RM_FirstBP): %s", 
@@ -139,56 +139,220 @@ static void _configure_node_down(rm_bp_id_t bp_id, my_bluegene_t *bg)
 	}
 }
 
-/* Convert base partition state value to a string */
-static char *_convert_bp_state(rm_BP_state_t state)
+static int _test_down_nodecards(rm_BP_t *bp_ptr)
 {
-	switch(state) { 
-	case RM_BP_UP:
-		return "RM_BP_UP";
-		break;
-	case RM_BP_DOWN:
-		return "RM_BP_DOWN";
-		break;
-	case RM_BP_MISSING:
-		return "RM_BP_MISSING";
-		break;
-	case RM_BP_ERROR:
-		return "RM_BP_ERROR";
-		break;
-	case RM_BP_NAV:
-		return "RM_BP_NAV";
+	rm_bp_id_t bp_id = NULL;
+	rm_nodecard_id_t nc_name = NULL;
+	int num = 0;
+	int i=0;
+	int rc = SLURM_SUCCESS;
+	rm_nodecard_list_t *ncard_list = NULL;
+	rm_nodecard_t *ncard = NULL;
+	rm_nodecard_state_t state;
+	//bitstr_t *ionode_bitmap = NULL;
+	//bg_record_t *bg_record = NULL;
+	int *coord = NULL;
+	char *node_name = NULL;
+	//int bp_bit = 0;
+	//int io_cnt = 1;
+
+	/* Translate 1 nodecard count to ionode count */
+/* 	if((io_cnt *= bluegene_io_ratio)) */
+/* 		io_cnt--; */
+
+	if ((rc = bridge_get_data(bp_ptr, RM_BPID, &bp_id))
+	    != STATUS_OK) {
+		error("bridge_get_data(RM_BPID): %s",
+		      bg_err_str(rc));
+		return SLURM_ERROR;
+	}
+
+	if ((rc = bridge_get_nodecards(bp_id, &ncard_list))
+	    != STATUS_OK) {
+		error("bridge_get_nodecards(%s): %d",
+		      bp_id, rc);
+		rc = SLURM_ERROR;
+		goto clean_up;
+	}
+
+	coord = find_bp_loc(bp_id);
+	if(!coord) {
+		error("Could not find coordinates for "
+		      "BP ID %s", (char *) bp_id);
+		rc = SLURM_ERROR;
+		goto clean_up;
+	}
+	
+	node_name = xstrdup_printf("%s%c%c%c",
+				   bg_slurm_node_prefix,
+				   alpha_num[coord[X]], 
+				   alpha_num[coord[Y]],
+				   alpha_num[coord[Z]]);
+
+	if((rc = bridge_get_data(ncard_list, RM_NodeCardListSize, &num))
+	   != STATUS_OK) {
+		error("bridge_get_data(RM_NodeCardListSize): %s", 
+		      bg_err_str(rc));
+		rc = SLURM_ERROR;
+		goto clean_up;
+	}
+	
+	for(i=0; i<num; i++) {
+		int io_start = 0;
+
+		if (i) {
+			if ((rc = bridge_get_data(ncard_list, 
+						  RM_NodeCardListNext, 
+						  &ncard)) != STATUS_OK) {
+				error("bridge_get_data"
+				      "(RM_NodeCardListNext): %s",
+				      rc);
+				rc = SLURM_ERROR;
+				goto clean_up;
+			}
+		} else {
+			if ((rc = bridge_get_data(ncard_list, 
+						  RM_NodeCardListFirst, 
+						  &ncard)) != STATUS_OK) {
+				error("bridge_get_data"
+				      "(RM_NodeCardListFirst: %s",
+				      rc);
+				rc = SLURM_ERROR;
+				goto clean_up;
+			}
+		}
+		if ((rc = bridge_get_data(ncard, 
+					  RM_NodeCardState, 
+					  &state)) != STATUS_OK) {
+			error("bridge_get_data(RM_NodeCardState: %s",
+			      rc);
+			rc = SLURM_ERROR;
+			goto clean_up;
+		}
+
+		if(state == RM_NODECARD_UP) 
+			continue;
+
+		if ((rc = bridge_get_data(ncard, 
+					  RM_NodeCardID, 
+					  &nc_name)) != STATUS_OK) {
+			error("bridge_get_data(RM_NodeCardID): %d",rc);
+			rc = SLURM_ERROR;
+			goto clean_up;
+		}
+		
+		if(!nc_name) {
+			rc = SLURM_ERROR;
+			goto clean_up;
+		}
+
+#ifdef HAVE_BGL
+		if ((rc = bridge_get_data(ncard, 
+					  RM_NodeCardQuarter, 
+					  &io_start)) != STATUS_OK) {
+			error("bridge_get_data(CardQuarter): %d",rc);
+			goto clean_up;
+		}
+		io_start *= bluegene_quarter_ionode_cnt;
+		io_start += bluegene_nodecard_ionode_cnt * (i%4);
+#else
+		/* From the first nodecard id we can figure
+		   out where to start from with the alloc of ionodes.
+		*/
+		io_start = atoi((char*)nc_name+1);
+		io_start *= bluegene_io_ratio;
+#endif
+
+/* 		if(!ionode_bitmap)  */
+/* 			ionode_bitmap = bit_alloc(bluegene_numpsets); */
+/* 		info("setting %d-%d of %d", */
+/* 		     io_start, io_start+io_cnt, bluegene_numpsets); */
+/* 		bit_nset(ionode_bitmap, io_start, io_start+io_cnt); */
+		/* we have to handle each nodecard separately to make
+		   sure we don't create holes in the system */
+		if(down_nodecard(node_name, io_start) == SLURM_SUCCESS) {
+			debug("nodecard %s on %s is in an error state",
+			      nc_name, node_name);
+		}
+		free(nc_name);
 	}
-	return "BP_STATE_UNIDENTIFIED!";
+
+	/* this code is here to bring up a block after it is in an
+	   error state.  It is commented out because it hasn't been
+	   tested very well yet.  If you ever want to use this code
+	   there should probably be a configurable option in the
+	   bluegene.conf file that gives you an option as to have this
+	   happen or not automatically.
+	*/
+/* 	if(ionode_bitmap) { */
+/* 		info("got ionode_bitmap"); */
+		
+/* 		bit_not(ionode_bitmap); */
+/* 		up_nodecard(node_name, ionode_bitmap); */
+/* 	} else { */
+/* 		int ret = 0; */
+/* 		info("no ionode_bitmap"); */
+/* 		ListIterator itr = NULL; */
+/* 		slurm_mutex_lock(&block_state_mutex); */
+/* 		itr = list_iterator_create(bg_list); */
+/* 		while ((bg_record = list_next(itr))) { */
+/* 			if(bg_record->job_running != BLOCK_ERROR_STATE) */
+/* 				continue; */
+			
+/* 			if(!bit_test(bg_record->bitmap, bp_bit)) */
+/* 				continue; */
+/* 			info("bringing %s back to service", */
+/* 			     bg_record->bg_block_id); */
+/* 			bg_record->job_running = NO_JOB_RUNNING; */
+/* 			bg_record->state = RM_PARTITION_FREE; */
+/* 			last_bg_update = time(NULL); */
+/* 		} */
+/* 		list_iterator_destroy(itr); */
+/* 		slurm_mutex_unlock(&block_state_mutex); */
+		
+/* 		/\* FIX ME: This needs to call the opposite of */
+/* 		   slurm_drain_nodes which does not yet exist. */
+/* 		*\/ */
+/* 		if((ret = node_already_down(node_name))) { */
+/* 			/\* means it was drained *\/ */
+/* 			if(ret == 2) { */
+/* 				/\* debug("node %s put back into service after " *\/ */
+/* /\* 				      "being in an error state", *\/ */
+/* /\* 				      node_name); *\/ */
+/* 			} */
+/* 		} */
+/* 	} */
+	
+clean_up:
+	xfree(node_name);
+/* 	if(ionode_bitmap) */
+/* 		FREE_NULL_BITMAP(ionode_bitmap); */
+	free(bp_id);
+	
+	return rc;
 }
 
 /* Test for nodes that are not UP in MMCS and DRAIN them in SLURM */ 
-static void _test_down_nodes(my_bluegene_t *bg)
+static void _test_down_nodes(my_bluegene_t *my_bg)
 {
 	int bp_num, i, rc;
 	rm_BP_t *my_bp;
-	rm_BP_state_t bp_state;
-	rm_location_t bp_loc;
-	char down_node_list[BUFSIZE];
-	char bg_down_node[128];
-	char reason[128], time_str[32];
-	time_t now = time(NULL);
 		
 	debug2("Running _test_down_nodes");
-	down_node_list[0] = '\0';
-	if ((rc = bridge_get_data(bg, RM_BPNum, &bp_num)) != STATUS_OK) {
+	if ((rc = bridge_get_data(my_bg, RM_BPNum, &bp_num)) != STATUS_OK) {
 		error("bridge_get_data(RM_BPNum): %s", bg_err_str(rc));
 		bp_num = 0;
 	}
 	for (i=0; i<bp_num; i++) {
 		if (i) {
-			if ((rc = bridge_get_data(bg, RM_NextBP, &my_bp)) 
+			if ((rc = bridge_get_data(my_bg, RM_NextBP, &my_bp)) 
 			    != STATUS_OK) {
 				error("bridge_get_data(RM_NextBP): %s", 
 				      bg_err_str(rc));
 				continue;
 			}
 		} else {
-			if ((rc = bridge_get_data(bg, RM_FirstBP, &my_bp)) 
+			if ((rc = bridge_get_data(my_bg, RM_FirstBP, &my_bp)) 
 			    != STATUS_OK) {
 				error("bridge_get_data(RM_FirstBP): %s", 
 				      bg_err_str(rc));
@@ -196,57 +360,13 @@ static void _test_down_nodes(my_bluegene_t *bg)
 			}
 		}
 
-		if ((rc = bridge_get_data(my_bp, RM_BPState, &bp_state)) 
-		    != STATUS_OK) {
-			error("bridge_get_data(RM_BPState): %s", 
-			      bg_err_str(rc));
-			continue;
-		}
-		
-		if  (bp_state == RM_BP_UP)
-			continue;
-		
-		if ((rc = bridge_get_data(my_bp, RM_BPLoc, &bp_loc)) 
-		    != STATUS_OK) {
-			error("bridge_get_data(RM_BPLoc): %s", bg_err_str(rc));
-			continue;
-		}
-
-		
-		snprintf(bg_down_node, sizeof(bg_down_node), "%s%c%c%c", 
-			 bg_slurm_node_prefix,
-			 alpha_num[bp_loc.X], alpha_num[bp_loc.Y],
-			 alpha_num[bp_loc.Z]);
-		
-	
-		if (node_already_down(bg_down_node))
-			continue;
-
-		debug("_test_down_nodes: %s in state %s", 
-		      bg_down_node, _convert_bp_state(bp_state));
-		
-		if ((strlen(down_node_list) + strlen(bg_down_node) 
-		     + 2) 
-		    < BUFSIZE) {
-			if (down_node_list[0] != '\0')
-				strcat(down_node_list,",");
-			strcat(down_node_list, bg_down_node);
-		} else
-			error("down_node_list overflow");
+		_test_down_nodecards(my_bp);
 	}
-	if (down_node_list[0]) {
-		slurm_make_time_str(&now, time_str, sizeof(time_str));
-		snprintf(reason, sizeof(reason), 
-			 "select_bluegene: MMCS state not UP [SLURM@%s]", 
-			 time_str); 
-		slurm_drain_nodes(down_node_list, reason);
-	}
-	
 }
 
 /* Test for switches that are not UP in MMCS, 
  * when found DRAIN them in SLURM and configure their base partition DOWN */
-static void _test_down_switches(my_bluegene_t *bg)
+static void _test_down_switches(my_bluegene_t *my_bg)
 {
 	int switch_num, i, rc;
 	rm_switch_t *my_switch;
@@ -254,14 +374,14 @@ static void _test_down_switches(my_bluegene_t *bg)
 	rm_switch_state_t switch_state;
 
 	debug2("Running _test_down_switches");
-	if ((rc = bridge_get_data(bg, RM_SwitchNum, &switch_num)) 
+	if ((rc = bridge_get_data(my_bg, RM_SwitchNum, &switch_num)) 
 	    != STATUS_OK) {
 		error("bridge_get_data(RM_SwitchNum): %s", bg_err_str(rc));
 		switch_num = 0;
 	}
 	for (i=0; i<switch_num; i++) {
 		if (i) {
-			if ((rc = bridge_get_data(bg, RM_NextSwitch, 
+			if ((rc = bridge_get_data(my_bg, RM_NextSwitch, 
 						  &my_switch))
 			    != STATUS_OK) {
 				error("bridge_get_data(RM_NextSwitch): %s", 
@@ -269,7 +389,7 @@ static void _test_down_switches(my_bluegene_t *bg)
 				continue;
 			}
 		} else {
-			if ((rc = bridge_get_data(bg, RM_FirstSwitch, 
+			if ((rc = bridge_get_data(my_bg, RM_FirstSwitch, 
 						  &my_switch))
 			    != STATUS_OK) {
 				error("bridge_get_data(RM_FirstSwitch): %s",
@@ -298,14 +418,14 @@ static void _test_down_switches(my_bluegene_t *bg)
 			continue;
 		}
 
-		_configure_node_down(bp_id, bg);
+		_configure_node_down(bp_id, my_bg);
 		free(bp_id);
 	}
 }
 #endif
 
 /* Determine if specific slurm node is already in DOWN or DRAIN state */
-extern bool node_already_down(char *node_name)
+extern int node_already_down(char *node_name)
 {
 	uint16_t base_state;
 	struct node_record *node_ptr = find_node_record(node_name);
@@ -313,14 +433,16 @@ extern bool node_already_down(char *node_name)
 	if (node_ptr) {
 		base_state = node_ptr->node_state & 
 			(~NODE_STATE_NO_RESPOND);
-		if ((base_state == NODE_STATE_DOWN)
-		    ||  (base_state == NODE_STATE_DRAIN))
-			return true;
+
+		if(base_state & NODE_STATE_DRAIN)
+			return 2;
+		else if (base_state == NODE_STATE_DOWN)
+			return 1;
 		else
-			return false;
+			return 0;
 	}
 
-	return false;
+	return 0;
 }
 
 /* 
@@ -331,19 +453,19 @@ extern bool node_already_down(char *node_name)
 extern void test_mmcs_failures(void)
 {
 #ifdef HAVE_BG_FILES
-	my_bluegene_t *bg;
+	my_bluegene_t *local_bg;
 	int rc;
 
-	if ((rc = bridge_get_bg(&bg)) != STATUS_OK) {
+	if ((rc = bridge_get_bg(&local_bg)) != STATUS_OK) {
 		
 		error("bridge_get_BG(): %s", bg_err_str(rc));
 		return;
 	}
 	
 			
-	_test_down_switches(bg);
-	_test_down_nodes(bg);
-	if ((rc = bridge_free_bg(bg)) != STATUS_OK)
+	_test_down_switches(local_bg);
+	_test_down_nodes(local_bg);
+	if ((rc = bridge_free_bg(local_bg)) != STATUS_OK)
 		error("bridge_free_BG(): %s", bg_err_str(rc));
 #endif
 }
@@ -354,16 +476,8 @@ extern int check_block_bp_states(char *bg_block_id)
 #ifdef HAVE_BG_FILES
 	rm_partition_t *block_ptr = NULL;
 	rm_BP_t *bp_ptr = NULL;
-	char *bpid = NULL;
 	int bp_cnt = 0;
 	int i = 0;
-	int *coord = NULL;
-	rm_BP_state_t bp_state;
-	char bg_down_node[128], reason[128], time_str[32];
-	char down_node_list[BUFSIZE];
-	time_t now = time(NULL);
-	
-	down_node_list[0] = '\0';
 	
 	if ((rc = bridge_get_block(bg_block_id, &block_ptr)) != STATUS_OK) {
 		error("Block %s doesn't exist.", bg_block_id);
@@ -402,60 +516,13 @@ extern int check_block_bp_states(char *bg_block_id)
 				break;
 			}	
 		}
-		if ((rc = bridge_get_data(bp_ptr, RM_BPState, &bp_state))
-		    != STATUS_OK) {
-			error("bridge_get_data(RM_BPLoc): %s",
-			      bg_err_str(rc));
-			rc = SLURM_ERROR;
-			break;
-		}
-		if(bp_state == RM_BP_UP)
-			continue;
-		rc = SLURM_ERROR;
-		if ((rc = bridge_get_data(bp_ptr, RM_BPID, &bpid))
-		    != STATUS_OK) {
-			error("bridge_get_data(RM_BPID): %s",
-			      bg_err_str(rc));
-			break;
-		}
-		coord = find_bp_loc(bpid);
-		
-		if(!coord) {
-			fatal("Could not find coordinates for "
-			      "BP ID %s", (char *) bpid);
-		}
-		free(bpid);
-		
-		snprintf(bg_down_node, sizeof(bg_down_node), "%s%c%c%c", 
-			 bg_slurm_node_prefix,
-			 alpha_num[coord[X]], alpha_num[coord[Y]],
-			 alpha_num[coord[Z]]);
-		
-	
-		if (node_already_down(bg_down_node))
-			continue;
 
-		debug("check_block_bp_states: %s in state %s", 
-		      bg_down_node, _convert_bp_state(bp_state));
-		if ((strlen(down_node_list) + strlen(bg_down_node) + 2) 
-		    < BUFSIZE) {
-			if (down_node_list[0] != '\0')
-				strcat(down_node_list,",");
-			strcat(down_node_list, bg_down_node);
-		} else
-			error("down_node_list overflow");
+		_test_down_nodecards(bp_ptr);
 	}
 	
 cleanup:
 	bridge_free_block(block_ptr);
 done:
-	if (down_node_list[0]) {
-		slurm_make_time_str(&now, time_str, sizeof(time_str));
-		snprintf(reason, sizeof(reason), 
-			 "select_bluegene: MMCS state not UP [SLURM@%s]", 
-			 time_str); 
-		slurm_drain_nodes(down_node_list, reason);
-	}
 #endif
 	return rc;
 
diff --git a/src/plugins/select/bluegene/plugin/state_test.h b/src/plugins/select/bluegene/plugin/state_test.h
index f429b3760..b6a96d99f 100644
--- a/src/plugins/select/bluegene/plugin/state_test.h
+++ b/src/plugins/select/bluegene/plugin/state_test.h
@@ -1,6 +1,6 @@
 /*****************************************************************************\
  *  state_test.h - header for Blue Gene node and switch state test. 
- *  $Id: state_test.h 9170 2006-09-05 18:00:19Z jette $
+ *  $Id: state_test.h 17102 2009-03-31 23:23:01Z da $
  *****************************************************************************
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -38,8 +38,9 @@
 #ifndef _STATE_TEST_H_
 #define _STATE_TEST_H_
 
-/* Determine if specific slurm node is already in DOWN or DRAIN state */
-extern bool node_already_down(char *node_name);
+/* Determine if specific slurm node is already in DOWN or DRAIN ret (1) or
+ * FAIL ret (2) state idle ret (0) */
+extern int node_already_down(char *node_name);
 
 /*
  * Search MMCS for failed switches and nodes. Failed resources are DRAINED in 
diff --git a/src/plugins/select/cons_res/select_cons_res.c b/src/plugins/select/cons_res/select_cons_res.c
index e1b8d6c8d..7c6d07b26 100644
--- a/src/plugins/select/cons_res/select_cons_res.c
+++ b/src/plugins/select/cons_res/select_cons_res.c
@@ -2,7 +2,7 @@
  *  select_cons_res.c - node selection plugin supporting consumable 
  *  resources policies.
  *
- *  $Id: select_cons_res.c 15841 2008-12-05 00:45:29Z jette $
+ *  $Id: select_cons_res.c 17022 2009-03-25 18:42:18Z jette $
  *****************************************************************************\
  *
  *  The following example below illustrates how four jobs are allocated
@@ -1910,11 +1910,9 @@ static int _select_nodes(struct job_record *job_ptr, bitstr_t * bitmap,
 		}
 	}
 
-	/* allocated node count should never exceed num_procs, right? 
-	 * if so, then this should be done earlier and max_nodes
-	 * could be used to make this process more efficient (truncate
-	 * # of available nodes when (# of idle nodes == max_nodes)*/
-	if (max_nodes > job_ptr->num_procs)
+	/* NOTE: num_procs is 1 by default.
+	 * Only reset max_nodes if user explicitly sets a process count */
+	if ((job_ptr->num_procs > 1) && (max_nodes > job_ptr->num_procs))
 		max_nodes = job_ptr->num_procs;
 
 	origmap = bit_copy(bitmap);
diff --git a/src/plugins/select/linear/select_linear.c b/src/plugins/select/linear/select_linear.c
index 8c5f3e421..7791bd222 100644
--- a/src/plugins/select/linear/select_linear.c
+++ b/src/plugins/select/linear/select_linear.c
@@ -661,20 +661,34 @@ static int _find_job_mate(struct job_record *job_ptr, bitstr_t *bitmap,
 {
 	ListIterator job_iterator;
 	struct job_record *job_scan_ptr;
+	int rc = EINVAL;
 
 	job_iterator = list_iterator_create(job_list);
 	while ((job_scan_ptr = (struct job_record *) list_next(job_iterator))) {
-		if ((job_scan_ptr->part_ptr == job_ptr->part_ptr) &&
-		    (job_scan_ptr->job_state == JOB_RUNNING) &&
-		    (job_scan_ptr->node_cnt == req_nodes) &&
-		    (job_scan_ptr->total_procs >= job_ptr->num_procs) &&
-		    bit_super_set(job_scan_ptr->node_bitmap, bitmap)) {
-			bit_and(bitmap, job_scan_ptr->node_bitmap);
-			return SLURM_SUCCESS;
-		}
+		if ((job_scan_ptr->part_ptr  != job_ptr->part_ptr) ||
+		    (job_scan_ptr->job_state != JOB_RUNNING) ||
+		    (job_scan_ptr->node_cnt  != req_nodes) ||
+		    (job_scan_ptr->total_procs < job_ptr->num_procs) ||
+		    (!bit_super_set(job_scan_ptr->node_bitmap, bitmap)))
+			continue;
+
+		if (job_ptr->details->req_node_bitmap &&
+		    (!bit_super_set(job_ptr->details->req_node_bitmap,
+				    job_scan_ptr->node_bitmap)))
+			continue;	/* Required nodes missing from job */
+
+		if (job_ptr->details->exc_node_bitmap &&
+		    (bit_overlap(job_ptr->details->exc_node_bitmap,
+				 job_scan_ptr->node_bitmap) != 0))
+			continue;	/* Excluded nodes in this job */
+
+		bit_and(bitmap, job_scan_ptr->node_bitmap);
+		job_ptr->total_procs = job_scan_ptr->total_procs;
+		rc = SLURM_SUCCESS;
+		break;
 	}
 	list_iterator_destroy(job_iterator);
-	return EINVAL;
+	return rc;
 }
 
 /* _job_test - does most of the real work for select_p_job_test(), which 
diff --git a/src/sacct/options.c b/src/sacct/options.c
index 9af15153c..841834678 100644
--- a/src/sacct/options.c
+++ b/src/sacct/options.c
@@ -408,13 +408,15 @@ void _help_msg(void)
 	       "    Display job accounting data for all users. By default, only\n"
 	       "    data for the current user is displayed for users other than\n"
 	       "    root.\n"
+	       "-A, --accounts\n"
+	       "    Only send data about these accounts.  Default is all.\n"
 	       "-b, --brief\n"
 	       "    Equivalent to \"--fields=jobstep,state,error\". This option\n"
 	       "    has no effect if --dump is specified.\n"
 	       "-c, --completion\n"
 	       "    Use job completion instead of accounting data.\n"
-	       "-C, --cluster\n"
-	       "    Only send data about this cluster -1 for all clusters.\n"
+	       "-C, --clusters\n"
+	       "    Only send data about these clusters.  -1 for all clusters.\n"
 	       "-d, --dump\n"
 	       "    Dump the raw data records\n"
 	       "--duplicates\n"
@@ -444,10 +446,12 @@ void _help_msg(void)
 	       "    followed by \"d\", it is interpreted as number of days. For\n"
 	       "    example, \"--expire=14d\" means that you wish to purge the job\n"
 	       "    accounting log of all jobs that completed more than 14 days ago.\n" 
-	       "-F <field-list>, --fields=<field-list>\n"
-	       "    Display the specified data (use \"--help-fields\" for a\n"
-	       "    list of available fields). If no field option is specified,\n"
-	       "    we use \"--fields=jobstep,jobname,partition,alloc_cpus,state,error\".\n"
+	       "--endtime:                                                   \n"
+               "    Select jobs eligible before this time.                   \n"
+	       "-F <format-list>, --format=<format-list>\n"
+	       "    Display the specified data (use \"--helpformat\" for a\n"
+	       "    list of available fields). If no format option is specified,\n"
+	       "    we use \"--format=jobstep,jobname,partition,alloc_cpus,state,error\".\n"
 	       "-f<file>, --file=<file>\n"
 	       "    Read data from the specified file, rather than SLURM's current\n"
 	       "    accounting log file.\n"
@@ -486,6 +490,8 @@ void _help_msg(void)
 	       "-S, --stat\n"
 	       "    Get real time state of a jobstep supplied by the -j\n"
 	       "    option\n" 
+	       "--starttime:                                                 \n"
+               "    Select jobs eligible after this time.                    \n"
 	       "-t, --total\n"
 	       "    Only show cumulative statistics for each job, not the\n"
 	       "    intermediate steps\n"
@@ -496,7 +502,15 @@ void _help_msg(void)
 	       "    Pointer to this message.\n"
 	       "-v, --verbose\n"
 	       "    Primarily for debugging purposes, report the state of various\n"
-	       "    variables during processing.\n", conf->slurm_conf);
+	       "    variables during processing.\n"
+	       "-W, --wckeys\n"
+	       "    Only send data about these wckeys.  Default is all.\n"
+	       "\n"
+	       "Note, valid start/end time formats are...\n"
+	       "    HH:MM[:SS] [AM|PM]\n"
+	       "    MMDD[YY] or MM/DD[/YY] or MM.DD[.YY]\n"
+	       "    MM/DD[/YY]-HH:MM[:SS]\n"
+	       , conf->slurm_conf);
 
 	slurm_conf_unlock();
 
@@ -624,19 +638,22 @@ void parse_command_line(int argc, char **argv)
 		{"accounts", 1, 0, 'A'},
 		{"begin", 1, 0, 'B'},
 		{"brief", 0, 0, 'b'},
-		{"cluster", 1, 0, 'C'},
+		{"clusters", 1, 0, 'C'},
 		{"completion", 0, &params.opt_completion, 'c'},
 		{"duplicates", 0, &params.opt_dup, 1},
 		{"dump", 0, 0, 'd'},
 		{"end", 1, 0, 'E'},
+		{"endtime", 1, 0, 'E'},
 		{"expire", 1, 0, 'e'},
 		{"fields", 1, 0, 'F'},
+		{"format", 1, 0, 'F'},
 		{"file", 1, 0, 'f'},
 		{"formatted_dump", 0, 0, 'O'},
 		{"gid", 1, 0, 'g'},
 		{"group", 1, 0, 'g'},
 		{"help", 0, &params.opt_help, 1},
 		{"help-fields", 0, &params.opt_help, 2},
+		{"helpformat", 0, &params.opt_help, 2},
 		{"jobs", 1, 0, 'j'},
 		{"long", 0, 0, 'l'},
 		{"big_logfile", 0, &params.opt_lowmem, 1},
@@ -646,15 +663,16 @@ void parse_command_line(int argc, char **argv)
 		{"purge", 0, 0, 'P'},
 		{"state", 1, 0, 's'},
 		{"stat", 0, 0, 'S'},
+		{"starttime", 1, 0, 'B'},
 		{"total", 0, 0,  't'},
 		{"uid", 1, 0, 'u'},
 		{"usage", 0, &params.opt_help, 3},
 		{"user", 1, 0, 'u'},
 		{"verbose", 0, 0, 'v'},
 		{"version", 0, 0, 'V'},
+		{"wckeys", 1, 0, 'W'},
 		{0, 0, 0, 0}};
 
-	job_cond->duplicates = params.opt_dup;
 	params.opt_uid = getuid();
 	params.opt_gid = getgid();
 
@@ -662,7 +680,7 @@ void parse_command_line(int argc, char **argv)
 
 	while (1) {		/* now cycle through the command line */
 		c = getopt_long(argc, argv,
-				"aA:bB:cC:deE:F:f:g:hj:lOP:p:s:StUu:Vv",
+				"aA:bB:cC:deE:F:f:g:hj:lOP:p:s:StUu:VvW:",
 				long_options, &optionIndex);
 		if (c == -1)
 			break;
@@ -811,6 +829,12 @@ void parse_command_line(int argc, char **argv)
 			params.opt_verbose++;
 			break;
 
+		case 'W':
+			if(!job_cond->wckey_list) 
+				job_cond->wckey_list =
+					list_create(slurm_destroy_char);
+			slurm_addto_char_list(job_cond->wckey_list, optarg);
+			break;
 		case 'V':
 			printf("%s %s\n", PACKAGE, SLURM_VERSION);
 			exit(0);
@@ -828,6 +852,8 @@ void parse_command_line(int argc, char **argv)
 	if (params.opt_fdump) 
 		params.opt_dup |= FDUMP_FLAG;
 
+	job_cond->duplicates = params.opt_dup;
+
 	debug("Options selected:\n"
 	      "\topt_archive_jobs=%d\n"
 	      "\topt_archve_steps=%d\n"
@@ -920,6 +946,12 @@ void parse_command_line(int argc, char **argv)
 		}
 	}
 
+	/* if any jobs are specified set to look for all users if none
+	   are set */
+	if((job_cond->step_list && list_count(job_cond->step_list))
+	   && (!job_cond->userid_list || !list_count(job_cond->userid_list)))
+		all_users=1;
+
 	if(all_users) {
 		if(job_cond->userid_list 
 		   && list_count(job_cond->userid_list)) {
@@ -996,6 +1028,15 @@ void parse_command_line(int argc, char **argv)
 		list_iterator_destroy(itr);
 	}
 
+	if (params.opt_verbose && job_cond->wckey_list 
+	    && list_count(job_cond->wckey_list)) {
+		fprintf(stderr, "Wckeys requested:\n");
+		itr = list_iterator_create(job_cond->wckey_list);
+		while((start = list_next(itr))) 
+			fprintf(stderr, "\t: %s\n", start);
+		list_iterator_destroy(itr);
+	} 
+
 	/* select the output fields */
 	if(brief_output) {
 		if(params.opt_completion)
@@ -1318,6 +1359,9 @@ void do_list(void)
 	jobacct_job_rec_t *job = NULL;
 	jobacct_step_rec_t *step = NULL;
 	
+	if(!jobs)
+		return;
+
 	if (params.opt_total)
 		do_jobsteps = 0;
 	itr = list_iterator_create(jobs);
@@ -1364,7 +1408,10 @@ void do_list_completion(void)
 {
 	ListIterator itr = NULL;
 	jobcomp_job_rec_t *job = NULL;
-	
+
+	if(!jobs)
+		return;
+
 	itr = list_iterator_create(jobs);
 	while((job = list_next(itr))) {
 		print_fields(JOBCOMP, job);
diff --git a/src/sacct/print.c b/src/sacct/print.c
index c7f6bf4c7..700701329 100644
--- a/src/sacct/print.c
+++ b/src/sacct/print.c
@@ -39,6 +39,7 @@
 
 #include "sacct.h"
 #include "src/common/parse_time.h"
+#include "src/common/hostlist.h"
 #include "slurm.h"
 #define FORMAT_STRING_SIZE 34
 
@@ -69,16 +70,20 @@ void _elapsed_time(long secs, long usecs, char *str)
 
 	if (days) 
 		snprintf(str, FORMAT_STRING_SIZE,
-			 "%ld-%2.2ld:%2.2ld:%2.2ld",
+			 "%2.2ld-%2.2ld:%2.2ld:%2.2ld",
 		         days, hours, minutes, seconds);
 	else if (hours)
 		snprintf(str, FORMAT_STRING_SIZE,
-			 "%ld:%2.2ld:%2.2ld",
+			 "%2.2ld:%2.2ld:%2.2ld",
 		         hours, minutes, seconds);
-	else
+	else if(subsec)
 		snprintf(str, FORMAT_STRING_SIZE,
-			 "%ld:%2.2ld.%3.3ld",
+			 "%2.2ld:%2.2ld.%3.3ld",
 		         minutes, seconds, subsec);
+	else
+		snprintf(str, FORMAT_STRING_SIZE,
+			 "00:%2.2ld:%2.2ld",
+		         minutes, seconds);
 }
 
 void print_fields(type_t type, void *object)
@@ -406,25 +411,50 @@ void print_nodes(type_t type, void *object)
 
 void print_nnodes(type_t type, void *object)
 { 
+	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
 	jobcomp_job_rec_t *jobcomp = (jobcomp_job_rec_t *)object;
-	char temp[FORMAT_STRING_SIZE];
+	jobacct_step_rec_t *step = (jobacct_step_rec_t *)object;
+	char *tmp_char = NULL;
+	int tmp_int = NO_VAL;
+	hostlist_t hl = NULL;
 
 	switch(type) {
 	case HEADLINE:
 		printf("%-8s", "Node Cnt");
+		tmp_int = INFINITE;
 		break;
 	case UNDERSCORE:
 		printf("%-8s", "--------");
+		tmp_int = INFINITE;
+		break;
+	case JOB:
+		tmp_char = job->nodes;
+		break;
+	case JOBSTEP:
+		tmp_char = step->nodes;
 		break;
 	case JOBCOMP:
-		convert_num_unit((float)jobcomp->node_cnt, temp, 
-				 sizeof(temp), UNIT_NONE);
-		printf("%-8s", temp);
+		tmp_int = jobcomp->node_cnt;
 		break;
 	default:
-		printf("%-8s", "n/a");
 		break;
-	} 
+	}
+	if(tmp_char) {
+		hl = hostlist_create(tmp_char);
+		tmp_int = hostlist_count(hl);
+		hostlist_destroy(hl);
+	}
+
+	if(tmp_int == INFINITE)
+		return;
+	else if(tmp_int == NO_VAL) 
+		printf("%-8s", "n/a");
+	else {
+		char outbuf[FORMAT_STRING_SIZE];
+		convert_num_unit((float)tmp_int, 
+				 outbuf, sizeof(outbuf), UNIT_NONE);
+		printf("%-8s", outbuf);
+	}
 }
 
 void print_ntasks(type_t type, void *object)
@@ -724,6 +754,37 @@ void print_submit(type_t type, void *object)
 	} 
 }
 
+void print_eligible(type_t type, void *object)
+{ 
+	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
+	jobacct_step_rec_t *step = (jobacct_step_rec_t *)object;
+	char time_str[32];
+		
+	switch(type) {
+	case HEADLINE:
+		printf("%-14s", "Eligible Time");
+		break;
+	case UNDERSCORE:
+		printf("%-14.14s", "--------------");
+		break;
+	case JOB:
+		slurm_make_time_str(&job->eligible, 
+				    time_str, 
+				    sizeof(time_str));
+		printf("%-14s", time_str);
+		break;
+	case JOBSTEP:
+		slurm_make_time_str(&step->start, 
+				    time_str, 
+				    sizeof(time_str));
+		printf("%-14s", time_str);
+		break;
+	default:
+		printf("%-14s", "n/a");
+		break;
+	} 
+}
+
 void print_start(type_t type, void *object)
 { 
 	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
diff --git a/src/sacct/sacct.c b/src/sacct/sacct.c
index e4c2cb82e..5f8a56062 100644
--- a/src/sacct/sacct.c
+++ b/src/sacct/sacct.c
@@ -158,6 +158,7 @@ fields_t fields[] = {{"account", print_account},
 		     {"cpu", print_cpu},
 		     {"cputime", print_cputime}, 
 		     {"elapsed", print_elapsed},
+		     {"eligible", print_eligible},
 		     {"end", print_end}, 
 		     {"exitcode", print_exitcode},
 		     {"finished", print_end},		/* Defunct name */ 
diff --git a/src/sacct/sacct.h b/src/sacct/sacct.h
index 0be55d550..27db34813 100644
--- a/src/sacct/sacct.h
+++ b/src/sacct/sacct.h
@@ -151,6 +151,7 @@ void print_pages(type_t type, void *object);
 void print_rss(type_t type, void *object);
 void print_state(type_t type, void *object);
 void print_submit(type_t type, void *object);
+void print_eligible(type_t type, void *object);
 void print_start(type_t type, void *object);
 void print_end(type_t type, void *object);
 void print_systemcpu(type_t type, void *object);
diff --git a/src/sacctmgr/account_functions.c b/src/sacctmgr/account_functions.c
index 97e95e11f..40fc5f711 100644
--- a/src/sacctmgr/account_functions.c
+++ b/src/sacctmgr/account_functions.c
@@ -101,7 +101,9 @@ static int _set_cond(int *start, int argc, char *argv[],
 			  || !strncasecmp (argv[i], "Names", 
 					   MAX(command_len, 1))
 			  || !strncasecmp (argv[i], "Accounts",
-					   MAX(command_len, 1))) {
+					   MAX(command_len, 1))
+			  || !strncasecmp (argv[i], "Acct",
+					   MAX(command_len, 4))) {
 			if(!assoc_cond->acct_list) {
 				assoc_cond->acct_list = 
 					list_create(slurm_destroy_char);
@@ -134,6 +136,8 @@ static int _set_cond(int *start, int argc, char *argv[],
 			if(format_list)
 				slurm_addto_char_list(format_list, argv[i]+end);
 		} else if (!strncasecmp (argv[i], "FairShare", 
+					 MAX(command_len, 1))
+			   || !strncasecmp (argv[i], "Shares",
 					 MAX(command_len, 1))) {
 			if(!assoc_cond->fairshare_list)
 				assoc_cond->fairshare_list =
@@ -339,7 +343,9 @@ static int _set_rec(int *start, int argc, char *argv[],
 			  || !strncasecmp (argv[i], "Account",
 					   MAX(command_len, 1))
 			  || !strncasecmp (argv[i], "Names", 
-					   MAX(command_len, 1))) {
+					   MAX(command_len, 1))
+			  || !strncasecmp (argv[i], "Acct",
+					    MAX(command_len, 4))) {
 			if(acct_list) 
 				slurm_addto_char_list(acct_list, argv[i]+end);
 				
@@ -353,6 +359,8 @@ static int _set_rec(int *start, int argc, char *argv[],
 			acct->description =  strip_quotes(argv[i]+end, NULL, 1);
 			u_set = 1;
 		} else if (!strncasecmp (argv[i], "FairShare", 
+					 MAX(command_len, 1))
+			   || !strncasecmp (argv[i], "Shares",
 					 MAX(command_len, 1))) {
 			if(!assoc)
 				continue;
@@ -1123,6 +1131,12 @@ extern int sacctmgr_list_account(int argc, char *argv[])
 			field->name = xstrdup("Par Name");
 			field->len = 10;
 			field->print_routine = print_fields_str;
+		} else if(!strncasecmp("Shares", object,
+				       MAX(command_len, 1))) {
+			field->type = PRINT_FAIRSHARE;
+			field->name = xstrdup("Shares");
+			field->len = 9;
+			field->print_routine = print_fields_uint;
 		} else if(!strncasecmp("User", object, MAX(command_len, 1))) {
 			field->type = PRINT_USER;
 			field->name = xstrdup("User");
diff --git a/src/sacctmgr/association_functions.c b/src/sacctmgr/association_functions.c
index 947426c57..9f6c0ba08 100644
--- a/src/sacctmgr/association_functions.c
+++ b/src/sacctmgr/association_functions.c
@@ -108,7 +108,9 @@ static int _set_cond(int *start, int argc, char *argv[],
 			list_iterator_destroy(itr);
 			set = 1;
 		} else if (!strncasecmp (argv[i], "Accounts",
-					 MAX(command_len, 2))) {
+					 MAX(command_len, 2))
+			   || !strncasecmp (argv[i], "Acct",
+					    MAX(command_len, 4))) {
 			if(!assoc_cond->acct_list)
 				assoc_cond->acct_list = 
 					list_create(slurm_destroy_char);
@@ -129,6 +131,8 @@ static int _set_cond(int *start, int argc, char *argv[],
 				slurm_addto_char_list(format_list,
 						      argv[i]+end);
 		} else if (!strncasecmp (argv[i], "FairShare",
+					 MAX(command_len, 1))
+			   || !strncasecmp (argv[i], "Shares",
 					 MAX(command_len, 1))) {
 			if(!assoc_cond->fairshare_list)
 				assoc_cond->fairshare_list =
@@ -373,7 +377,8 @@ extern int sacctmgr_list_association(int argc, char *argv[])
 	
 		field = xmalloc(sizeof(print_field_t));
 
-		if(!strncasecmp("Account", object, MAX(command_len, 1))) {
+		if(!strncasecmp("Account", object, MAX(command_len, 1))
+		   || !strncasecmp("Acct", object, MAX(command_len, 4))) {
 			field->type = PRINT_ACCOUNT;
 			field->name = xstrdup("Account");
 			if(tree_display)
@@ -522,6 +527,12 @@ extern int sacctmgr_list_association(int argc, char *argv[])
 			field->name = xstrdup("RGT");
 			field->len = 6;
 			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("Shares", object,
+				       MAX(command_len, 1))) {
+			field->type = PRINT_FAIRSHARE;
+			field->name = xstrdup("Shares");
+			field->len = 9;
+			field->print_routine = print_fields_uint;
 		} else if(!strncasecmp("User", object, MAX(command_len, 1))) {
 			field->type = PRINT_USER;
 			field->name = xstrdup("User");
diff --git a/src/sacctmgr/cluster_functions.c b/src/sacctmgr/cluster_functions.c
index 38d4bac66..23a9d5a54 100644
--- a/src/sacctmgr/cluster_functions.c
+++ b/src/sacctmgr/cluster_functions.c
@@ -130,6 +130,8 @@ static int _set_rec(int *start, int argc, char *argv[],
 			if(name_list)
 				slurm_addto_char_list(name_list, argv[i]+end);
 		} else if (!strncasecmp (argv[i], "FairShare", 
+					 MAX(command_len, 1))
+			   || !strncasecmp (argv[i], "Shares",
 					 MAX(command_len, 1))) {
 			if (get_uint(argv[i]+end, &assoc->fairshare, 
 			    "FairShare") == SLURM_SUCCESS)
@@ -211,7 +213,7 @@ static int _set_rec(int *start, int argc, char *argv[],
 					" Bad MaxWall time format: %s\n", 
 					argv[i]);
 			}
-		} else if (!strncasecmp (argv[i], "QosLevel", 
+		} else if (!strncasecmp (argv[i], "QOSLevel", 
 					 MAX(command_len, 1))) {
 			if(!assoc->qos_list) 
 				assoc->qos_list = 
@@ -567,6 +569,12 @@ extern int sacctmgr_list_cluster(int argc, char *argv[])
 			field->name = xstrdup("RPC");
 			field->len = 3;
 			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("Shares", object, 
+				       MAX(command_len, 1))) {
+			field->type = PRINT_FAIRSHARE;
+			field->name = xstrdup("Shares");
+			field->len = 9;
+			field->print_routine = print_fields_uint;
 		} else {
 			exit_code=1;
 			fprintf(stderr, "Unknown field '%s'\n", object);
diff --git a/src/sacctmgr/file_functions.c b/src/sacctmgr/file_functions.c
index 9fcfb033b..61848f224 100644
--- a/src/sacctmgr/file_functions.c
+++ b/src/sacctmgr/file_functions.c
@@ -336,6 +336,8 @@ static sacctmgr_file_opts_t *_parse_options(char *options)
 					 MAX(command_len, 3))) {
 			file_opts->desc = xstrdup(option);
 		} else if (!strncasecmp (sub, "FairShare", 
+					 MAX(command_len, 1))
+			   || !strncasecmp (sub, "Shares",
 					 MAX(command_len, 1))) {
 			if (get_uint(option, &file_opts->fairshare, 
 			    "FairShare") != SLURM_SUCCESS) {
@@ -584,7 +586,9 @@ static List _set_up_print_fields(List format_list)
 			field->len = 20;
 			field->print_routine = print_fields_str;
 		} else if(!strncasecmp("FairShare", object, 
-				       MAX(command_len, 1))) {
+				       MAX(command_len, 1))
+			  || !strncasecmp("Shares", object, 
+					  MAX(command_len, 1))) {
 			field->type = PRINT_FAIRSHARE;
 			field->name = xstrdup("FairShare");
 			field->len = 9;
@@ -1119,7 +1123,7 @@ static int _mod_user(sacctmgr_file_opts_t *file_opts,
 
 		user->wckey_list = list_create(destroy_acct_wckey_rec);
 		wckey_itr = list_iterator_create(file_opts->wckey_list);
-		printf(" Adding WCKey(s) '");
+		printf(" Adding WCKey(s) ");
 		while((temp_char = list_next(wckey_itr))) {
 			wckey = xmalloc(sizeof(acct_wckey_rec_t));
 			wckey->name = xstrdup(temp_char);
@@ -1128,13 +1132,13 @@ static int _mod_user(sacctmgr_file_opts_t *file_opts,
 			list_push(user->wckey_list, wckey);
 
 			if(first) {
-				printf(" %s", temp_char);
+				printf("'%s'", temp_char);
 				first = 0;
 			} else
-				printf(", %s", temp_char);
+				printf(", '%s'", temp_char);
 		}
 		list_iterator_destroy(wckey_itr);
-		printf("' for user '%s'\n", user->name);
+		printf(" for user '%s'\n", user->name);
 		set = 1;
 		notice_thread_init();
 		rc = acct_storage_g_add_wckeys(db_conn, my_uid, 
@@ -1587,7 +1591,7 @@ static acct_account_rec_t *_set_acct_up(sacctmgr_file_opts_t *file_opts,
 		acct->organization = xstrdup(parent);
 	else
 		acct->organization = xstrdup(file_opts->name);
-	/* info("adding acct %s (%s) (%s)", */
+	/* info("adding account %s (%s) (%s)", */
 /* 	        acct->name, acct->description, */
 /* 		acct->organization); */
 
@@ -1686,7 +1690,9 @@ static int _print_file_acct_hierarchical_rec_childern(FILE *fd,
 			if(user_rec) {
 				xstrfmtcat(line, ":DefaultAccount='%s'",
 					   user_rec->default_acct);
-				if(track_wckey)
+				if(track_wckey 
+				   && user_rec->default_wckey 
+				   && user_rec->default_wckey[0])
 					xstrfmtcat(line, ":DefaultWCKey='%s'",
 						   user_rec->default_wckey);
 					
diff --git a/src/sacctmgr/sacctmgr.c b/src/sacctmgr/sacctmgr.c
index 3deadcb19..45187b98f 100644
--- a/src/sacctmgr/sacctmgr.c
+++ b/src/sacctmgr/sacctmgr.c
@@ -489,7 +489,8 @@ static void _add_it (int argc, char *argv[])
 	acct_storage_g_commit(db_conn, 0);
 	
 	/* First identify the entity to add */
-	if (strncasecmp (argv[0], "Account", MAX(command_len, 1)) == 0) {
+	if (strncasecmp (argv[0], "Account", MAX(command_len, 1)) == 0
+	    || !strncasecmp (argv[0], "Acct", MAX(command_len, 4))) {
 		error_code = sacctmgr_add_account((argc - 1), &argv[1]);
 	} else if (strncasecmp (argv[0], "Cluster", MAX(command_len, 2)) == 0) {
 		error_code = sacctmgr_add_cluster((argc - 1), &argv[1]);
@@ -576,7 +577,8 @@ static void _show_it (int argc, char *argv[])
 	acct_storage_g_commit(db_conn, 0);
 
 	/* First identify the entity to list */
-	if (strncasecmp (argv[0], "Accounts", MAX(command_len, 2)) == 0) {
+	if (strncasecmp (argv[0], "Accounts", MAX(command_len, 2)) == 0
+	    || !strncasecmp (argv[0], "Acct", MAX(command_len, 4))) {
 		error_code = sacctmgr_list_account((argc - 1), &argv[1]);
 	} else if (strncasecmp (argv[0], "Associations",
 				MAX(command_len, 2)) == 0) {
@@ -633,7 +635,8 @@ static void _modify_it (int argc, char *argv[])
 	acct_storage_g_commit(db_conn, 0);
 
 	/* First identify the entity to modify */
-	if (strncasecmp (argv[0], "Accounts", MAX(command_len, 1)) == 0) {
+	if (strncasecmp (argv[0], "Accounts", MAX(command_len, 1)) == 0
+	    || !strncasecmp (argv[0], "Acct", MAX(command_len, 4))) {
 		error_code = sacctmgr_modify_account((argc - 1), &argv[1]);
 	} else if (strncasecmp (argv[0], "Clusters", 
 				MAX(command_len, 1)) == 0) {
@@ -677,7 +680,8 @@ static void _delete_it (int argc, char *argv[])
 	acct_storage_g_commit(db_conn, 0);
 
 	/* First identify the entity to delete */
-	if (strncasecmp (argv[0], "Accounts", MAX(command_len, 1)) == 0) {
+	if (strncasecmp (argv[0], "Accounts", MAX(command_len, 1)) == 0
+	    || !strncasecmp (argv[0], "Acct", MAX(command_len, 4))) {
 		error_code = sacctmgr_delete_account((argc - 1), &argv[1]);
 	} else if (strncasecmp (argv[0], "Clusters",
 				MAX(command_len, 2)) == 0) {
diff --git a/src/sacctmgr/user_functions.c b/src/sacctmgr/user_functions.c
index 50cf5654f..3b3835180 100644
--- a/src/sacctmgr/user_functions.c
+++ b/src/sacctmgr/user_functions.c
@@ -112,7 +112,9 @@ static int _set_cond(int *start, int argc, char *argv[],
 						 argv[i]+end)) 
 				u_set = 1;
 		} else if (!strncasecmp (argv[i], "Account",
-					 MAX(command_len, 2))) {
+					 MAX(command_len, 2))
+			   || !strncasecmp (argv[i], "Acct",
+					    MAX(command_len, 4))) {
 			if(!assoc_cond->acct_list) {
 				assoc_cond->acct_list = 
 					list_create(slurm_destroy_char);
@@ -157,6 +159,8 @@ static int _set_cond(int *start, int argc, char *argv[],
 			if(format_list)
 				slurm_addto_char_list(format_list, argv[i]+end);
 		} else if (!strncasecmp (argv[i], "FairShare", 
+					 MAX(command_len, 1))
+			   || !strncasecmp (argv[i], "Shares",
 					 MAX(command_len, 1))) {
 			if(!assoc_cond->fairshare_list)
 				assoc_cond->fairshare_list =
@@ -370,6 +374,8 @@ static int _set_rec(int *start, int argc, char *argv[],
 				strip_quotes(argv[i]+end, NULL, 1);
 			u_set = 1;
 		} else if (!strncasecmp (argv[i], "FairShare",
+					 MAX(command_len, 1))
+			   || !strncasecmp (argv[i], "Shares",
 					 MAX(command_len, 1))) {
 			if(!assoc)
 				continue;
@@ -518,7 +524,7 @@ static int _set_rec(int *start, int argc, char *argv[],
 
 /*
  * IN: user_cond - used for the assoc_cond pointing to the user and
- *     acct list 
+ *     account list 
  * IN: check - whether or not to check if the existance of the above lists
  */
 static int _check_coord_request(acct_user_cond_t *user_cond, bool check)
@@ -704,7 +710,9 @@ extern int sacctmgr_add_user(int argc, char *argv[])
 			slurm_addto_char_list(assoc_cond->user_list,
 					      argv[i]+end);
 		} else if (!strncasecmp (argv[i], "Accounts", 
-					 MAX(command_len, 2))) {
+					 MAX(command_len, 2))
+			   || !strncasecmp (argv[i], "Acct",
+					    MAX(command_len, 4))) {
 			slurm_addto_char_list(assoc_cond->acct_list,
 					argv[i]+end);
 		} else if (!strncasecmp (argv[i], "AdminLevel", 
@@ -739,6 +747,8 @@ extern int sacctmgr_add_user(int argc, char *argv[])
 			slurm_addto_char_list(wckey_cond->name_list,
 					      default_wckey);
 		} else if (!strncasecmp (argv[i], "FairShare",
+					 MAX(command_len, 1))
+			   || !strncasecmp (argv[i], "Shares",
 					 MAX(command_len, 1))) {
 			if (get_uint(argv[i]+end, &start_assoc.fairshare, 
 			    "FairShare") == SLURM_SUCCESS)
@@ -967,7 +977,7 @@ extern int sacctmgr_add_user(int argc, char *argv[])
 			destroy_acct_wckey_cond(wckey_cond);
 			destroy_acct_association_cond(assoc_cond);
 			exit_code=1;
-			fprintf(stderr, " Need name of acct to "
+			fprintf(stderr, " Need name of account to "
 				"add user to.\n"); 
 			return SLURM_ERROR;
 		}
@@ -1538,7 +1548,8 @@ extern int sacctmgr_list_user(int argc, char *argv[])
 		command_len = strlen(object);
 
 		field = xmalloc(sizeof(print_field_t));
-		if(!strncasecmp("Account", object, MAX(command_len, 2))) {
+		if(!strncasecmp("Account", object, MAX(command_len, 2))
+		   || !strncasecmp ("Acct", object, MAX(command_len, 4))) {
 			field->type = PRINT_ACCOUNT;
 			field->name = xstrdup("Account");
 			field->len = 10;
@@ -1680,6 +1691,12 @@ extern int sacctmgr_list_user(int argc, char *argv[])
 			field->name = xstrdup("Partition");
 			field->len = 10;
 			field->print_routine = print_fields_str;
+		} else if(!strncasecmp("Shares", object,
+				       MAX(command_len, 1))) {
+			field->type = PRINT_FAIRSHARE;
+			field->name = xstrdup("Shares");
+			field->len = 9;
+			field->print_routine = print_fields_uint;
 		} else if(!strncasecmp("User", object, MAX(command_len, 1))
 			  || !strncasecmp("Name", object, 
 					  MAX(command_len, 2))) {
diff --git a/src/salloc/opt.c b/src/salloc/opt.c
index 9aaf5dd2d..14ea9975b 100644
--- a/src/salloc/opt.c
+++ b/src/salloc/opt.c
@@ -546,8 +546,10 @@ void set_options(const int argc, char **argv)
 		{"reboot",	  no_argument,       0, LONG_OPT_REBOOT},
 		{"blrts-image",   required_argument, 0, LONG_OPT_BLRTS_IMAGE},
 		{"linux-image",   required_argument, 0, LONG_OPT_LINUX_IMAGE},
+		{"cnload-image",  required_argument, 0, LONG_OPT_LINUX_IMAGE},
 		{"mloader-image", required_argument, 0, LONG_OPT_MLOADER_IMAGE},
 		{"ramdisk-image", required_argument, 0, LONG_OPT_RAMDISK_IMAGE},
+		{"ioload-image",  required_argument, 0, LONG_OPT_RAMDISK_IMAGE},
 		{"acctg-freq",    required_argument, 0, LONG_OPT_ACCTG_FREQ},
 		{"no-shell",      no_argument,       0, LONG_OPT_NOSHELL},
 		{"get-user-env",  optional_argument, 0, LONG_OPT_GET_USER_ENV},
@@ -1352,14 +1354,25 @@ static void _opt_list()
 	xfree(str);
 	info("reboot         : %s", opt.reboot ? "no" : "yes");
 	info("rotate         : %s", opt.no_rotate ? "yes" : "no");
+#ifdef HAVE_BGL
 	if (opt.blrtsimage)
 		info("BlrtsImage     : %s", opt.blrtsimage);
+#endif
 	if (opt.linuximage)
+#ifdef HAVE_BGL
 		info("LinuxImage     : %s", opt.linuximage);
+#else
+		info("CnloadImage    : %s", opt.linuximage);
+#endif
 	if (opt.mloaderimage)
 		info("MloaderImage   : %s", opt.mloaderimage);
 	if (opt.ramdiskimage)
+#ifdef HAVE_BGL
 		info("RamDiskImage   : %s", opt.ramdiskimage);
+#else
+		info("IoloadImage   : %s", opt.ramdiskimage);
+#endif
+
 	if (opt.begin) {
 		char time_str[32];
 		slurm_make_time_str(&opt.begin, time_str, sizeof(time_str));
@@ -1396,8 +1409,13 @@ static void _usage(void)
 "              [--account=name] [--dependency=type:jobid] [--comment=name]\n"
 #ifdef HAVE_BG		/* Blue gene specific options */
 "              [--geometry=XxYxZ] [--conn-type=type] [--no-rotate] [ --reboot]\n"
+#ifdef HAVE_BGL
 "              [--blrts-image=path] [--linux-image=path]\n"
 "              [--mloader-image=path] [--ramdisk-image=path]\n"
+#else
+"              [--cnload-image=path]\n"
+"              [--mloader-image=path] [--ioload-image=path]\n"
+#endif
 #endif
 "              [--mail-type=type] [--mail-user=user][--nice[=value]]\n"
 "              [--bell] [--no-bell] [--kill-command[=signal]]\n"
@@ -1490,24 +1508,34 @@ static void _help(void)
 
         printf("\n"
 #ifdef HAVE_AIX				/* AIX/Federation specific options */
-  "AIX related options:\n"
-  "  --network=type              communication protocol to be used\n"
-  "\n"
+"AIX related options:\n"
+"  --network=type              communication protocol to be used\n"
+"\n"
 #endif
 #ifdef HAVE_BG				/* Blue gene specific options */
 "\n"
-  "Blue Gene related options:\n"
-  "  -g, --geometry=XxYxZ        geometry constraints of the job\n"
-  "  -R, --no-rotate             disable geometry rotation\n"
-  "      --reboot                reboot nodes before starting job\n"
-  "      --conn-type=type        constraint on type of connection, MESH or TORUS\n"
-  "                              if not set, then tries to fit TORUS else MESH\n"
-  "      --blrts-image=path      path to blrts image for bluegene block.  Default if not set\n"
-  "      --linux-image=path      path to linux image for bluegene block.  Default if not set\n"
-  "      --mloader-image=path    path to mloader image for bluegene block.  Default if not set\n"
-  "      --ramdisk-image=path    path to ramdisk image for bluegene block.  Default if not set\n"
-  "\n"
+"Blue Gene related options:\n"
+"  -g, --geometry=XxYxZ        geometry constraints of the job\n"
+"  -R, --no-rotate             disable geometry rotation\n"
+"      --reboot                reboot nodes before starting job\n"
+"      --conn-type=type        constraint on type of connection, MESH or TORUS\n"
+"                              if not set, then tries to fit TORUS else MESH\n"
+#ifndef HAVE_BGL
+"                              If wanting to run in HTC mode (only for 1\n"
+"                              midplane and below).  You can use HTC_S for\n"
+"                              SMP, HTC_D for Dual, HTC_V for\n"
+"                              virtual node mode, and HTC_L for Linux mode.\n" 
+"      --cnload-image=path     path to compute node image for bluegene block.  Default if not set\n"
+"      --mloader-image=path    path to mloader image for bluegene block.  Default if not set\n"
+"      --ioload-image=path     path to ioload image for bluegene block.  Default if not set\n"
+#else
+"      --blrts-image=path      path to blrts image for bluegene block.  Default if not set\n"
+"      --linux-image=path      path to linux image for bluegene block.  Default if not set\n"
+"      --mloader-image=path    path to mloader image for bluegene block.  Default if not set\n"
+"      --ramdisk-image=path    path to ramdisk image for bluegene block.  Default if not set\n"
+#endif
 #endif
+"\n"
 "Help options:\n"
 "  -h, --help                  show this help message\n"
 "  -u, --usage                 display brief usage message\n"
diff --git a/src/salloc/salloc.c b/src/salloc/salloc.c
index 16ad770c2..4eb596af4 100644
--- a/src/salloc/salloc.c
+++ b/src/salloc/salloc.c
@@ -209,7 +209,8 @@ int main(int argc, char *argv[])
 	info("Granted job allocation %d", alloc->job_id);
 #ifdef HAVE_BG
 	if (!_wait_bluegene_block_ready(alloc)) {
-		error("Something is wrong with the boot of the block.");
+		if(!allocation_interrupted)
+			error("Something is wrong with the boot of the block.");
 		goto relinquish;
 	}
 
@@ -614,6 +615,7 @@ static int _wait_bluegene_block_ready(resource_allocation_response_msg_t *alloc)
 	int max_delay = BG_FREE_PREVIOUS_BLOCK + BG_MIN_BLOCK_BOOT +
 		(BG_INCR_BLOCK_BOOT * alloc->node_cnt);
 
+	pending_job_id = alloc->job_id;
 	select_g_get_jobinfo(alloc->select_jobinfo, SELECT_DATA_BLOCK_ID,
 			     &block_id);
 
@@ -642,12 +644,16 @@ static int _wait_bluegene_block_ready(resource_allocation_response_msg_t *alloc)
 			break;
 		}
 	}
-
 	if (is_ready)
      		info("Block %s is ready for job", block_id);
-	else
+	else if(!allocation_interrupted)
 		error("Block %s still not ready", block_id);
+	else /* this should never happen, but if allocation_intrrupted
+		send back not ready */
+		is_ready = 0;
+
 	xfree(block_id);
+	pending_job_id = 0;
 
 	return is_ready;
 }
diff --git a/src/sbatch/opt.c b/src/sbatch/opt.c
index b4130e524..207beef37 100644
--- a/src/sbatch/opt.c
+++ b/src/sbatch/opt.c
@@ -331,6 +331,7 @@ env_vars_t env_vars[] = {
   {"SBATCH_JOBID",         OPT_INT,        &opt.jobid,         NULL           },
   {"SBATCH_JOB_NAME",      OPT_STRING,     &opt.job_name,      NULL           },
   {"SBATCH_LINUX_IMAGE",   OPT_STRING,     &opt.linuximage,    NULL           },
+  {"SBATCH_CNLOAD_IMAGE",  OPT_STRING,     &opt.linuximage,    NULL           },
   {"SBATCH_MLOADER_IMAGE", OPT_STRING,     &opt.mloaderimage,  NULL           },
   {"SBATCH_NO_REQUEUE",    OPT_NO_REQUEUE, NULL,               NULL           },
   {"SBATCH_REQUEUE",       OPT_REQUEUE,    NULL,               NULL           },
@@ -338,6 +339,7 @@ env_vars_t env_vars[] = {
   {"SBATCH_OVERCOMMIT",    OPT_OVERCOMMIT, NULL,               NULL           },
   {"SBATCH_PARTITION",     OPT_STRING,     &opt.partition,     NULL           },
   {"SBATCH_RAMDISK_IMAGE", OPT_STRING,     &opt.ramdiskimage,  NULL           },
+  {"SBATCH_IOLOAD_IMAGE",  OPT_STRING,     &opt.ramdiskimage,  NULL           },
   {"SBATCH_TIMELIMIT",     OPT_STRING,     &opt.time_limit_str,NULL           },
   {"SBATCH_EXCLUSIVE",     OPT_EXCLUSIVE,  NULL,               NULL           },
   {"SBATCH_OPEN_MODE",     OPT_OPEN_MODE,  NULL,               NULL           },
@@ -553,8 +555,10 @@ static struct option long_options[] = {
 	{"ntasks-per-core",  required_argument, 0, LONG_OPT_NTASKSPERCORE},
 	{"blrts-image",   required_argument, 0, LONG_OPT_BLRTS_IMAGE},
 	{"linux-image",   required_argument, 0, LONG_OPT_LINUX_IMAGE},
+	{"cnload-image",  required_argument, 0, LONG_OPT_LINUX_IMAGE},
 	{"mloader-image", required_argument, 0, LONG_OPT_MLOADER_IMAGE},
 	{"ramdisk-image", required_argument, 0, LONG_OPT_RAMDISK_IMAGE},
+	{"ioload-image",  required_argument, 0, LONG_OPT_RAMDISK_IMAGE},
 	{"reboot",        no_argument,       0, LONG_OPT_REBOOT},
 	{"tasks-per-node",required_argument, 0, LONG_OPT_NTASKSPERNODE},
 	{"wrap",          required_argument, 0, LONG_OPT_WRAP},
@@ -2095,15 +2099,24 @@ static void _opt_list()
 	info("rotate         : %s", opt.no_rotate ? "yes" : "no");
 	info("network        : %s", opt.network);
 
+#ifdef HAVE_BGL
 	if (opt.blrtsimage)
 		info("BlrtsImage     : %s", opt.blrtsimage);
+#endif
 	if (opt.linuximage)
+#ifdef HAVE_BGL
 		info("LinuxImage     : %s", opt.linuximage);
+#else
+		info("CnloadImage    : %s", opt.linuximage);
+#endif
 	if (opt.mloaderimage)
 		info("MloaderImage   : %s", opt.mloaderimage);
 	if (opt.ramdiskimage)
+#ifdef HAVE_BGL
 		info("RamDiskImage   : %s", opt.ramdiskimage);
-
+#else
+		info("IoloadImage   : %s", opt.ramdiskimage);
+#endif
 	if (opt.begin) {
 		char time_str[32];
 		slurm_make_time_str(&opt.begin, time_str, sizeof(time_str));
@@ -2143,8 +2156,13 @@ static void _usage(void)
 "              [--account=name] [--dependency=type:jobid] [--comment=name]\n"
 #ifdef HAVE_BG		/* Blue gene specific options */
 "              [--geometry=XxYxZ] [--conn-type=type] [--no-rotate] [ --reboot]\n"
+#ifdef HAVE_BGL
 "              [--blrts-image=path] [--linux-image=path]\n"
 "              [--mloader-image=path] [--ramdisk-image=path]\n"
+#else
+"              [--cnload-image=path]\n"
+"              [--mloader-image=path] [--ioload-image=path]\n"
+#endif
 #endif
 "              [--mail-type=type] [--mail-user=user][--nice[=value]]\n"
 "              [--requeue] [--no-requeue] [--ntasks-per-node=n] [--propagate]\n"
@@ -2249,11 +2267,21 @@ static void _help(void)
 "      --reboot                reboot block before starting job\n"
 "      --conn-type=type        constraint on type of connection, MESH or TORUS\n"
 "                              if not set, then tries to fit TORUS else MESH\n"
+#ifndef HAVE_BGL
+"                              If wanting to run in HTC mode (only for 1\n"
+"                              midplane and below).  You can use HTC_S for\n"
+"                              SMP, HTC_D for Dual, HTC_V for\n"
+"                              virtual node mode, and HTC_L for Linux mode.\n" 
+"      --cnload-image=path     path to compute node image for bluegene block.  Default if not set\n"
+"      --mloader-image=path    path to mloader image for bluegene block.  Default if not set\n"
+"      --ioload-image=path     path to ioload image for bluegene block.  Default if not set\n"
+#else
 "      --blrts-image=path      path to blrts image for bluegene block.  Default if not set\n"
 "      --linux-image=path      path to linux image for bluegene block.  Default if not set\n"
 "      --mloader-image=path    path to mloader image for bluegene block.  Default if not set\n"
 "      --ramdisk-image=path    path to ramdisk image for bluegene block.  Default if not set\n"
 #endif
+#endif
 "\n"
 "Help options:\n"
 "  -h, --help                  show this help message\n"
diff --git a/src/sinfo/opts.c b/src/sinfo/opts.c
index 58cb1aba6..c46efbf26 100644
--- a/src/sinfo/opts.c
+++ b/src/sinfo/opts.c
@@ -246,7 +246,7 @@ extern void parse_command_line(int argc, char *argv[])
 		} else {
 			params.format = params.long_output ? 
 			  "%9P %.5a %.10l %.10s %.4r %.5h %.10g %.6D %.11T %N" :
-			  "%9P %.5a %.10l %.5D %.6t %N";
+			  "%9P %.5a %.10l %.6D %.6t %N";
 		}
 	}
 	_parse_format( params.format );
diff --git a/src/slurmctld/acct_policy.c b/src/slurmctld/acct_policy.c
index 51cc1b7cf..5471ca96d 100644
--- a/src/slurmctld/acct_policy.c
+++ b/src/slurmctld/acct_policy.c
@@ -101,7 +101,7 @@ extern void acct_policy_add_job_submit(struct job_record *job_ptr)
 {
 	acct_association_rec_t *assoc_ptr = NULL;
 
-	if (accounting_enforce != ACCOUNTING_ENFORCE_WITH_LIMITS
+	if (!(accounting_enforce & ACCOUNTING_ENFORCE_LIMITS)
 	    || !_valid_job_assoc(job_ptr))
 		return;
 
@@ -125,7 +125,7 @@ extern void acct_policy_remove_job_submit(struct job_record *job_ptr)
 	acct_association_rec_t *assoc_ptr = NULL;
 
 	if (!job_ptr->assoc_ptr || 
-	    accounting_enforce != ACCOUNTING_ENFORCE_WITH_LIMITS)
+	    !(accounting_enforce & ACCOUNTING_ENFORCE_LIMITS))
 		return;
 
 	slurm_mutex_lock(&assoc_mgr_association_lock);
@@ -150,7 +150,7 @@ extern void acct_policy_job_begin(struct job_record *job_ptr)
 {
 	acct_association_rec_t *assoc_ptr = NULL;
 
-	if (accounting_enforce != ACCOUNTING_ENFORCE_WITH_LIMITS
+	if (!(accounting_enforce & ACCOUNTING_ENFORCE_LIMITS)
 	    || !_valid_job_assoc(job_ptr))
 		return;
 
@@ -174,7 +174,7 @@ extern void acct_policy_job_fini(struct job_record *job_ptr)
 {
 	acct_association_rec_t *assoc_ptr = NULL;
 
-	if (accounting_enforce != ACCOUNTING_ENFORCE_WITH_LIMITS
+	if (!(accounting_enforce & ACCOUNTING_ENFORCE_LIMITS)
 	    || !job_ptr->assoc_ptr)
 		return;
 
@@ -231,7 +231,7 @@ extern bool acct_policy_job_runnable(struct job_record *job_ptr)
 	}
 
 	/* now see if we are enforcing limits */
-	if (accounting_enforce != ACCOUNTING_ENFORCE_WITH_LIMITS)
+	if (!(accounting_enforce & ACCOUNTING_ENFORCE_LIMITS))
 		return true;
 
 	/* clear old state reason */
@@ -301,7 +301,7 @@ extern bool acct_policy_job_runnable(struct job_record *job_ptr)
 		/* we don't need to check submit_jobs here */
 		
 		/* FIX ME: Once we start tracking time of running jobs
-		 * we will need toupdate the amount of time we have
+		 * we will need to update the amount of time we have
 		 * used and check against that here.  When we start
 		 * keeping track of time we will also need to come up
 		 * with a way to refresh the time. 
diff --git a/src/slurmctld/controller.c b/src/slurmctld/controller.c
index 11ce5f6f7..a3bf1b894 100644
--- a/src/slurmctld/controller.c
+++ b/src/slurmctld/controller.c
@@ -201,7 +201,8 @@ int main(int argc, char *argv[])
 	slurmctld_lock_t config_write_lock = {
 		WRITE_LOCK, WRITE_LOCK, WRITE_LOCK, WRITE_LOCK };
 	assoc_init_args_t assoc_init_arg;
-	pthread_t assoc_cache_thread;
+	pthread_t assoc_cache_thread = 0;
+	gid_t slurm_user_gid;
 
 	/*
 	 * Establish initial configuration
@@ -224,19 +225,43 @@ int main(int argc, char *argv[])
 	 */
 	_init_pidfile();
 
-	/* Initialize supplementary group ID list for SlurmUser */
-	if ((getuid() == 0)
-	&&  (slurmctld_conf.slurm_user_id != getuid())
-	&&  initgroups(slurmctld_conf.slurm_user_name,
-			gid_from_string(slurmctld_conf.slurm_user_name))) {
-		error("initgroups: %m");
+	/* Determine SlurmUser gid */
+	slurm_user_gid = gid_from_uid(slurmctld_conf.slurm_user_id);
+	if (slurm_user_gid == (gid_t) -1) {
+		fatal("Failed to determine gid of SlurmUser(%d)", 
+		      slurm_user_gid);
 	}
 
-	if ((slurmctld_conf.slurm_user_id != getuid())
-	&&  (setuid(slurmctld_conf.slurm_user_id))) {
+	/* Initialize supplementary groups ID list for SlurmUser */
+	if (getuid() == 0) {
+		/* root does not need supplementary groups */
+		if ((slurmctld_conf.slurm_user_id == 0) &&
+		    (setgroups(0, NULL) != 0)) {
+			fatal("Failed to drop supplementary groups, "
+			      "setgroups: %m");
+		} else if ((slurmctld_conf.slurm_user_id != getuid()) &&
+			   initgroups(slurmctld_conf.slurm_user_name, 
+				      slurm_user_gid)) {
+			fatal("Failed to set supplementary groups, "
+			      "initgroups: %m");
+		}
+	} else {
+		info("Not running as root. Can't drop supplementary groups");
+	}
+
+	/* Set GID to GID of SlurmUser */
+	if ((slurm_user_gid != getegid()) &&
+	    (setgid(slurm_user_gid))) {
+		fatal("Failed to set GID to %d", slurm_user_gid);
+	}
+
+	/* Set UID to UID of SlurmUser */
+	if ((slurmctld_conf.slurm_user_id != getuid()) &&
+	    (setuid(slurmctld_conf.slurm_user_id))) {
 		fatal("Can not set uid to SlurmUser(%d): %m", 
-			slurmctld_conf.slurm_user_id);
+		      slurmctld_conf.slurm_user_id);
 	}
+
 	if (stat(slurmctld_conf.mail_prog, &stat_buf) != 0)
 		error("Configured MailProg is invalid");
 
@@ -308,6 +333,21 @@ int main(int argc, char *argv[])
 	association_based_accounting =
 		slurm_get_is_association_based_accounting();
 	accounting_enforce = slurmctld_conf.accounting_storage_enforce;
+
+	if(accounting_enforce && !association_based_accounting) {
+		slurm_ctl_conf_t *conf = slurm_conf_lock();
+		conf->track_wckey = false;
+		conf->accounting_storage_enforce = 0;
+		accounting_enforce = 0;
+		slurmctld_conf.track_wckey = false;
+		slurmctld_conf.accounting_storage_enforce = 0;
+		slurm_conf_unlock();
+
+		error("You can not have AccountingStorageEnforce "
+		      "set for AccountingStorageType='%s'", 
+		      slurmctld_conf.accounting_storage_type);
+	}
+
 	acct_db_conn = acct_storage_g_get_connection(true, 0, false);
 
 	memset(&assoc_init_arg, 0, sizeof(assoc_init_args_t));
@@ -317,7 +357,7 @@ int main(int argc, char *argv[])
 		ASSOC_MGR_CACHE_USER | ASSOC_MGR_CACHE_QOS;
 
 	if (assoc_mgr_init(acct_db_conn, &assoc_init_arg)) {
-		if(accounting_enforce) 
+		if(accounting_enforce & ACCOUNTING_ENFORCE_ASSOCS) 
 			error("Association database appears down, "
 			      "reading from state file.");
 		else
@@ -325,7 +365,8 @@ int main(int argc, char *argv[])
 			      "reading from state file.");
 			
 		if ((load_assoc_mgr_state(slurmctld_conf.state_save_location)
-		     != SLURM_SUCCESS) && accounting_enforce) {
+		     != SLURM_SUCCESS) 
+		    && (accounting_enforce & ACCOUNTING_ENFORCE_ASSOCS)) {
 			error("Unable to get any information from "
 			      "the state file");
 			fatal("slurmdbd and/or database must be up at "
@@ -405,16 +446,9 @@ int main(int argc, char *argv[])
 			}
 			unlock_slurmctld(config_write_lock);
 			
-			if ((recover == 0) || 
-			    (!stat("/tmp/slurm_accounting_first", &stat_buf))) {
-				/* When first starting to write node state
-				 * information to Gold or SlurmDBD, create 
-				 * a file called "/tmp/slurm_accounting_first"  
-				 * to capture node initialization information */
-				
+			if (recover == 0) 
 				_accounting_mark_all_nodes_down("cold-start");
-				unlink("/tmp/slurm_accounting_first");
-			}
+			
 		} else {
 			error("this host (%s) not valid controller (%s or %s)",
 				node_name, slurmctld_conf.control_machine,
@@ -431,7 +465,8 @@ int main(int argc, char *argv[])
 			   NULL will just use those set before.
 			*/
 			if (assoc_mgr_init(acct_db_conn, NULL) &&
-			    accounting_enforce && !running_cache) {
+			    (accounting_enforce & ACCOUNTING_ENFORCE_ASSOCS)
+			    && !running_cache) {
 				error("assoc_mgr_init failure");
 				fatal("slurmdbd and/or database must be up at "
 				      "slurmctld start time");
@@ -546,6 +581,8 @@ int main(int argc, char *argv[])
 	if (i >= 10)
 		error("Left %d agent threads active", cnt);
 
+	slurm_sched_fini();
+
 	/* Purge our local data structures */
 	job_fini();
 	part_fini();	/* part_fini() must preceed node_fini() */
@@ -558,7 +595,6 @@ int main(int argc, char *argv[])
 	g_slurm_jobcomp_fini();
 	slurm_acct_storage_fini();
 	slurm_jobacct_gather_fini();
-	slurm_sched_fini();
 	slurm_select_fini();
 	checkpoint_fini();
 	slurm_auth_fini();
@@ -964,8 +1000,7 @@ static int _accounting_cluster_ready()
 		/* see if we are running directly to a database
 		 * instead of a slurmdbd.
 		 */
-		send_jobs_to_accounting(event_time);
-		send_nodes_to_accounting(event_time);
+		send_all_to_accounting(event_time);
 		rc = SLURM_SUCCESS;
 	}
 
@@ -1016,7 +1051,7 @@ static void _remove_assoc(acct_association_rec_t *rec)
 {
 	int cnt = 0;
 
-	if (accounting_enforce)
+	if (accounting_enforce & ACCOUNTING_ENFORCE_ASSOCS)
 		cnt = job_cancel_by_assoc_id(rec->id);
 
 	if (cnt) {
@@ -1269,6 +1304,18 @@ void save_all_state(void)
 	dump_assoc_mgr_state(slurmctld_conf.state_save_location);
 }
 
+/* send all info for the controller to accounting */
+extern void send_all_to_accounting(time_t event_time)
+{
+	/* ignore the rcs here because if there was an error we will
+	   push the requests on the queue and process them when the
+	   database server comes back up.
+	*/
+	debug2("send_all_to_accounting: called");
+	send_jobs_to_accounting();
+	send_nodes_to_accounting(event_time);
+}
+
 /* 
  * _report_locks_set - report any slurmctld locks left set 
  * RET count of locks currently set
@@ -1423,7 +1470,8 @@ static void _usage(char *prog_name)
 /*
  * Tell the backup_controller to relinquish control, primary control_machine 
  *	has resumed operation
- * wait_time - How long to wait for backup controller to write state, seconds
+ * wait_time - How long to wait for backup controller to write state, seconds.
+ *             Must be zero when called from _slurmctld_background() loop.
  * RET 0 or an error code
  * NOTE: READ lock_slurmctld config before entry (or be single-threaded)
  */
@@ -1452,9 +1500,17 @@ static int _shutdown_backup_controller(int wait_time)
 	}
 	if (rc == ESLURM_DISABLED)
 		debug("backup controller responding");
-	else if (rc == 0)
+	else if (rc == 0) {
 		debug("backup controller has relinquished control");
-	else {
+		if (wait_time == 0) {
+			/* In case primary controller really did not terminate,
+			 * but just temporarily became non-responsive */
+			clusteracct_storage_g_register_ctld(
+				acct_db_conn,
+				slurmctld_cluster_name, 
+				slurmctld_conf.slurmctld_port);
+		}
+	} else {
 		error("_shutdown_backup_controller: %s", slurm_strerror(rc));
 		return SLURM_ERROR;
 	}
@@ -1587,8 +1643,9 @@ static void *_assoc_cache_mgr(void *no_data)
 	ListIterator itr = NULL;
 	struct job_record *job_ptr = NULL;
 	acct_association_rec_t assoc_rec;
+	/* Write lock on jobs, read lock on nodes and partitions */
 	slurmctld_lock_t job_write_lock =
-		{ READ_LOCK, WRITE_LOCK, WRITE_LOCK, READ_LOCK };
+		{ NO_LOCK, WRITE_LOCK, READ_LOCK, READ_LOCK };
 
 	while(running_cache == 1) {
 		slurm_mutex_lock(&assoc_cache_mutex);
@@ -1636,5 +1693,8 @@ static void *_assoc_cache_mgr(void *no_data)
 	}
 	list_iterator_destroy(itr);
 	unlock_slurmctld(job_write_lock);
+	/* This needs to be after the lock and after we update the
+	   jobs so if we need to send them we are set. */
+	_accounting_cluster_ready();
 	return NULL;
 }
diff --git a/src/slurmctld/job_mgr.c b/src/slurmctld/job_mgr.c
index f3847e752..3c3f38eac 100644
--- a/src/slurmctld/job_mgr.c
+++ b/src/slurmctld/job_mgr.c
@@ -757,7 +757,8 @@ static int _load_job_state(Buf buffer)
 				    accounting_enforce,
 				    (acct_association_rec_t **)
 				    &job_ptr->assoc_ptr) &&
-	    accounting_enforce && (!IS_JOB_FINISHED(job_ptr))) {
+	    (accounting_enforce & ACCOUNTING_ENFORCE_ASSOCS)
+	    && (!IS_JOB_FINISHED(job_ptr))) {
 		info("Cancelling job %u with invalid association",
 		     job_id);
 		job_ptr->job_state = JOB_CANCELLED;
@@ -2045,7 +2046,8 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 		error_code = ESLURM_INVALID_ACCOUNT;
 		return error_code;
 	} else if(association_based_accounting
-		  && !assoc_ptr && !accounting_enforce) {
+		  && !assoc_ptr 
+		  && !(accounting_enforce & ACCOUNTING_ENFORCE_ASSOCS)) {
 		/* if not enforcing associations we want to look for
 		   the default account and use it to avoid getting
 		   trash in the accounting records.
@@ -2063,7 +2065,7 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 	}
 	if (job_desc->account == NULL)
 		job_desc->account = xstrdup(assoc_rec.acct);
-	if ((accounting_enforce == ACCOUNTING_ENFORCE_WITH_LIMITS) &&
+	if ((accounting_enforce & ACCOUNTING_ENFORCE_LIMITS) &&
 	    (!_validate_acct_policy(job_desc, part_ptr, &assoc_rec))) {
 		info("_job_create: exceeded association's node or time limit "
 		     "for user %u", job_desc->user_id);
@@ -2229,7 +2231,8 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 						       part_ptr,
 						       &req_bitmap,
 						       &exc_bitmap))) {
-		error_code = ESLURM_ERROR_ON_DESC_TO_RECORD_COPY;
+		if(error_code == SLURM_ERROR)
+			error_code = ESLURM_ERROR_ON_DESC_TO_RECORD_COPY;
 		goto cleanup_fail;
 	}
 
@@ -2795,6 +2798,52 @@ _copy_job_desc_to_job_record(job_desc_msg_t * job_desc,
 	struct job_details *detail_ptr;
 	struct job_record *job_ptr;
 
+        if(slurm_get_track_wckey()) {
+		char *wckey = NULL;
+		if(!job_desc->name || !strchr(job_desc->name, '\"')) {
+			/* get the default wckey for this user since none was
+			 * given */
+			acct_user_rec_t user_rec;
+			memset(&user_rec, 0, sizeof(acct_user_rec_t));
+			user_rec.uid = job_desc->user_id;
+			assoc_mgr_fill_in_user(acct_db_conn, &user_rec,
+					       accounting_enforce);
+			if(user_rec.default_wckey)
+				xstrfmtcat(job_desc->name, "\"*%s",
+					   user_rec.default_wckey);
+			else if(!(accounting_enforce 
+				  & ACCOUNTING_ENFORCE_WCKEYS))
+				xstrcat(job_desc->name, "\"*");	
+			else {
+				error("Job didn't specify wckey and user "
+				      "%d has no default.", job_desc->user_id);
+				return ESLURM_INVALID_WCKEY;
+			}
+		} else if(job_desc->name 
+			  && (wckey = strchr(job_desc->name, '\"'))
+			  && (accounting_enforce & ACCOUNTING_ENFORCE_WCKEYS)) {
+			acct_wckey_rec_t wckey_rec, *wckey_ptr = NULL;
+			wckey++;
+				
+			memset(&wckey_rec, 0, sizeof(acct_wckey_rec_t));
+			wckey_rec.uid       = job_desc->user_id;
+			wckey_rec.name      = wckey;
+
+			if (assoc_mgr_fill_in_wckey(acct_db_conn, &wckey_rec,
+						    accounting_enforce,
+						    &wckey_ptr)) {
+				info("_job_create: invalid wckey '%s' "
+				     "for user %u.",
+				     wckey_rec.name, job_desc->user_id);
+				return ESLURM_INVALID_WCKEY;
+			}
+		} else if (accounting_enforce & ACCOUNTING_ENFORCE_WCKEYS) {
+			/* This should never happen */
+			info("_job_create: no wckey was given.");
+				return ESLURM_INVALID_WCKEY;
+		}
+	}
+
 	job_ptr = create_job_record(&error_code);
 	if (error_code)
 		return error_code;
@@ -2805,26 +2854,11 @@ _copy_job_desc_to_job_record(job_desc_msg_t * job_desc,
 		job_ptr->job_id = job_desc->job_id;
 	else
 		_set_job_id(job_ptr);
-	_add_job_hash(job_ptr);
 
 	if (job_desc->name)
-		job_ptr->name = xstrdup(job_desc->name);
-	
-        if(slurm_get_track_wckey() 
-	   && (!job_ptr->name || !strchr(job_ptr->name, '\"'))) {
-		/* get the default wckey for this user since none was
-		 * given */
-		acct_user_rec_t user_rec;
-		memset(&user_rec, 0, sizeof(acct_user_rec_t));
-		user_rec.uid = job_desc->user_id;
-		assoc_mgr_fill_in_user(acct_db_conn, &user_rec,
-				       accounting_enforce);
-		if(user_rec.default_wckey)
-			xstrfmtcat(job_ptr->name, "\"*%s",
-				   user_rec.default_wckey);
-		else
-			xstrcat(job_ptr->name, "\"*");	
-	}
+		job_ptr->name = xstrdup(job_desc->name);	
+
+	_add_job_hash(job_ptr);
 
 	job_ptr->user_id    = (uid_t) job_desc->user_id;
 	job_ptr->group_id   = (gid_t) job_desc->group_id;
@@ -3925,6 +3959,7 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 	bitstr_t *exc_bitmap = NULL, *req_bitmap = NULL;
 	time_t now = time(NULL);
 	multi_core_data_t *mc_ptr = NULL;
+	bool update_accounting = false;
 
 	job_ptr = find_job_record(job_specs->job_id);
 	if (job_ptr == NULL) {
@@ -3995,13 +4030,53 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 		}
 	}
 
+	if (job_specs->comment && wiki_sched && (!super_user)) {
+		/* User must use Moab command to change job comment */
+		error("Attempt to change comment for job %u",
+		      job_specs->job_id);
+		error_code = ESLURM_ACCESS_DENIED;
+#if 0
+		if (wiki_sched && strstr(job_ptr->comment, "QOS:")) {
+			if (strstr(job_ptr->comment, "FLAGS:PREEMPTOR"))
+				job_ptr->qos = QOS_EXPEDITE;
+			else if (strstr(job_ptr->comment, "FLAGS:PREEMPTEE"))
+				job_ptr->qos = QOS_STANDBY;
+			else
+				job_ptr->qos = QOS_NORMAL;
+#endif
+	} else if (job_specs->comment) {
+		xfree(job_ptr->comment);
+		job_ptr->comment = job_specs->comment;
+		job_specs->comment = NULL;	/* Nothing left to free */
+		info("update_job: setting comment to %s for job_id %u",
+		     job_ptr->comment, job_specs->job_id);
+
+		if (wiki_sched && strstr(job_ptr->comment, "QOS:")) {
+			if (strstr(job_ptr->comment, "FLAGS:PREEMPTOR"))
+				job_ptr->qos = QOS_EXPEDITE;
+			else if (strstr(job_ptr->comment, "FLAGS:PREEMPTEE"))
+				job_ptr->qos = QOS_STANDBY;
+			else
+				job_ptr->qos = QOS_NORMAL;
+		}
+	}
+
+	if (job_specs->requeue != (uint16_t) NO_VAL) {
+		detail_ptr->requeue = job_specs->requeue;
+		info("update_job: setting requeue to %u for job_id %u",
+		     job_specs->requeue, job_specs->job_id);
+	}
+
 	if (job_specs->priority != NO_VAL) {
-		if (super_user
-		    ||  (job_ptr->priority > job_specs->priority)) {
+		if (!IS_JOB_PENDING(job_ptr) || (detail_ptr == NULL)) 
+			error_code = ESLURM_DISABLED;
+		else if (super_user
+			 ||  (job_ptr->priority > job_specs->priority)) {
 			job_ptr->priority = job_specs->priority;
 			info("update_job: setting priority to %u for "
 			     "job_id %u", job_ptr->priority, 
 			     job_specs->job_id);
+			update_accounting = true;
 		} else {
 			error("Attempt to increase priority for job %u",
 			      job_specs->job_id);
@@ -4018,6 +4093,7 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 			info("update_job: setting priority to %u for "
 			     "job_id %u", job_ptr->priority,
 			     job_specs->job_id);
+			update_accounting = true;
 		} else {
 			error("Attempt to increase priority for job %u",
 			      job_specs->job_id);
@@ -4124,6 +4200,7 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 			info("update_job: setting num_procs to %u for "
 			     "job_id %u", job_specs->num_procs, 
 			     job_specs->job_id);
+			update_accounting = true;
 		} else {
 			error("Attempt to increase num_procs for job %u",
 			      job_specs->job_id);
@@ -4140,6 +4217,7 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 			info("update_job: setting min_nodes to %u for "
 			     "job_id %u", job_specs->min_nodes, 
 			     job_specs->job_id);
+			update_accounting = true;
 		} else {
 			error("Attempt to increase min_nodes for job %u",
 			      job_specs->job_id);
@@ -4274,37 +4352,6 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 		}
 	}
 
-	if (job_specs->comment && wiki_sched && (!super_user)) {
-		/* User must use Moab command to change job comment */
-		error("Attempt to change comment for job %u",
-		      job_specs->job_id);
-		error_code = ESLURM_ACCESS_DENIED;
-#if 0
-		if (wiki_sched && strstr(job_ptr->comment, "QOS:")) {
-			if (strstr(job_ptr->comment, "FLAGS:PREEMPTOR"))
-				job_ptr->qos = QOS_EXPEDITE;
-			else if (strstr(job_ptr->comment, "FLAGS:PREEMPTEE"))
-				job_ptr->qos = QOS_STANDBY;
-			else
-				job_ptr->qos = QOS_NORMAL;
-#endif
-	} else if (job_specs->comment) {
-		xfree(job_ptr->comment);
-		job_ptr->comment = job_specs->comment;
-		job_specs->comment = NULL;	/* Nothing left to free */
-		info("update_job: setting comment to %s for job_id %u",
-		     job_ptr->comment, job_specs->job_id);
-
-		if (wiki_sched && strstr(job_ptr->comment, "QOS:")) {
-			if (strstr(job_ptr->comment, "FLAGS:PREEMPTOR"))
-				job_ptr->qos = QOS_EXPEDITE;
-			else if (strstr(job_ptr->comment, "FLAGS:PREEMPTEE"))
-				job_ptr->qos = QOS_STANDBY;
-			else
-				job_ptr->qos = QOS_NORMAL;
-		}
-	}
-
 	if (job_specs->name) {
 		if (!IS_JOB_PENDING(job_ptr))
 			error_code = ESLURM_DISABLED;
@@ -4352,21 +4399,34 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 				xstrfmtcat(job_ptr->name, "%s", jname);
 				xfree(jname);
 			} 
-			
+
 			if(wckey) {
-				xstrfmtcat(job_ptr->name, "\"%s", wckey);
+				int rc = update_job_wckey("update_job",
+							  job_ptr, 
+							  wckey);
+				if (rc != SLURM_SUCCESS)
+					error_code = rc;
 				xfree(wckey);			
 			}
 
 			info("update_job: setting name to %s for job_id %u",
 			     job_ptr->name, job_specs->job_id);
+			update_accounting = true;
 		}
 	}
 
-	if (job_specs->requeue != (uint16_t) NO_VAL) {
-		detail_ptr->requeue = job_specs->requeue;
-		info("update_job: setting requeue to %u for job_id %u",
-		     job_specs->requeue, job_specs->job_id);
+	if (job_specs->account) {
+		if (!IS_JOB_PENDING(job_ptr))
+			error_code = ESLURM_DISABLED;
+		else {
+			int rc = update_job_account("update_job", job_ptr, 
+						    job_specs->account);
+			if (rc != SLURM_SUCCESS)
+				error_code = rc;
+			else
+				update_accounting = true;
+
+		}
 	}
 
 	if (job_specs->partition) {
@@ -4400,6 +4460,7 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 			info("update_job: setting partition to %s for "
 			     "job_id %u", job_specs->partition, 
 			     job_specs->job_id);
+			update_accounting = true;
 		} else {
 			error("Attempt to change partition for job %u",
 			      job_specs->job_id);
@@ -4465,13 +4526,6 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 		}
 	}
 
-	if (job_specs->account) {
-		int rc = update_job_account("update_job", job_ptr, 
-					    job_specs->account);
-		if (rc != SLURM_SUCCESS)
-			error_code = rc;
-	}
-
 	if (job_specs->ntasks_per_node != (uint16_t) NO_VAL) {
 		if ((!IS_JOB_PENDING(job_ptr)) || (detail_ptr == NULL))
 			error_code = ESLURM_DISABLED;
@@ -4502,9 +4556,10 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 	}
 
 	if (job_specs->begin_time) {
-		if (IS_JOB_PENDING(job_ptr) && detail_ptr)
+		if (IS_JOB_PENDING(job_ptr) && detail_ptr) {
 			detail_ptr->begin_time = job_specs->begin_time;
-		else
+			update_accounting = true;
+		} else
 			error_code = ESLURM_DISABLED;
 	}
 
@@ -4682,7 +4737,13 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 	 }
  }
 #endif
-
+        if(update_accounting) {
+		if (job_ptr->details && job_ptr->details->begin_time) {
+			/* Update job record in accounting to reflect changes */
+			jobacct_storage_g_job_start(
+				acct_db_conn, slurmctld_cluster_name, job_ptr);
+		}
+	}
 	return error_code;
 }
 
@@ -6002,6 +6063,8 @@ extern int job_cancel_by_assoc_id(uint32_t assoc_id)
 
 		info("Association deleted, cancelling job %u", 
 		     job_ptr->job_id);
+		/* make sure the assoc_mgr_association_lock isn't
+		   locked before this. */
 		job_signal(job_ptr->job_id, SIGKILL, 0, 0);
 		job_ptr->state_reason = FAIL_BANK_ACCOUNT;
 		cnt++;
@@ -6039,7 +6102,8 @@ extern int update_job_account(char *module, struct job_record *job_ptr,
 		     module, new_account, job_ptr->job_id);
 		return ESLURM_INVALID_ACCOUNT;
 	} else if(association_based_accounting 
-		  && !assoc_ptr && !accounting_enforce) {
+		  && !assoc_ptr 
+		  && !(accounting_enforce & ACCOUNTING_ENFORCE_ASSOCS)) {
 		/* if not enforcing associations we want to look for
 		   the default account and use it to avoid getting
 		   trash in the accounting records.
@@ -6049,11 +6113,12 @@ extern int update_job_account(char *module, struct job_record *job_ptr,
 					accounting_enforce, &assoc_ptr);
 		if(!assoc_ptr) {
 			debug("%s: we didn't have an association for account "
-			      "'%s' and user '%s', and we can't seem to find "
+			      "'%s' and user '%u', and we can't seem to find "
 			      "a default one either.  Keeping new account "
 			      "'%s'.  This will produce trash in accounting.  "
 			      "If this is not what you desire please put "
-			      "AccountStorageEnforce=1 in your slurm.conf "
+			      "AccountStorageEnforce=associations "
+			      "in your slurm.conf "
 			      "file.", module, new_account,
 			      job_ptr->user_id, new_account);
 			assoc_rec.acct = new_account;
@@ -6072,17 +6137,76 @@ extern int update_job_account(char *module, struct job_record *job_ptr,
 	job_ptr->assoc_id = assoc_rec.id;
 	job_ptr->assoc_ptr = (void *) assoc_ptr;
 
-	if (job_ptr->details && job_ptr->details->begin_time) {
-		/* Update account associated with the eligible time */
-		jobacct_storage_g_job_start(
-			acct_db_conn, slurmctld_cluster_name, job_ptr);
+	last_job_update = time(NULL);
+
+	return SLURM_SUCCESS;
+}
+
+/*
+ * Modify the account associated with a pending job
+ * IN module - where this is called from
+ * IN job_ptr - pointer to job which should be modified
+ * IN new_wckey - desired wckey name
+ * RET SLURM_SUCCESS or error code
+ */
+extern int update_job_wckey(char *module, struct job_record *job_ptr, 
+			    char *new_wckey)
+{
+	acct_wckey_rec_t wckey_rec, *wckey_ptr;
+
+	if ((!IS_JOB_PENDING(job_ptr)) || (job_ptr->details == NULL)) {
+		info("%s: attempt to modify account for non-pending "
+		     "job_id %u", module, job_ptr->job_id);
+		return ESLURM_DISABLED;
 	}
+
+	memset(&wckey_rec, 0, sizeof(acct_wckey_rec_t));
+	wckey_rec.uid       = job_ptr->user_id;
+	wckey_rec.name      = new_wckey;
+	if (assoc_mgr_fill_in_wckey(acct_db_conn, &wckey_rec,
+				    accounting_enforce, &wckey_ptr)) {
+		info("%s: invalid wckey %s for job_id %u",
+		     module, new_wckey, job_ptr->job_id);
+		return ESLURM_INVALID_WCKEY;
+	} else if(association_based_accounting 
+		  && !wckey_ptr 
+		  && !(accounting_enforce & ACCOUNTING_ENFORCE_WCKEYS)) {
+		/* if not enforcing associations we want to look for
+		   the default account and use it to avoid getting
+		   trash in the accounting records.
+		*/
+		wckey_rec.name = NULL;
+		assoc_mgr_fill_in_wckey(acct_db_conn, &wckey_rec,
+					accounting_enforce, &wckey_ptr);
+		if(!wckey_ptr) {
+			debug("%s: we didn't have a wckey record for wckey "
+			      "'%s' and user '%u', and we can't seem to find "
+			      "a default one either.  Setting it anyway. "
+			      "This will produce trash in accounting.  "
+			      "If this is not what you desire please put "
+			      "AccountStorageEnforce=wckeys in your slurm.conf "
+			      "file.", module, new_wckey,
+			      job_ptr->user_id, new_wckey);
+			wckey_rec.name = new_wckey;
+		}
+	}
+	
+	if (wckey_rec.name && wckey_rec.name[0] != '\0') {
+		xstrfmtcat(job_ptr->name, "\"%s", wckey_rec.name);
+		job_ptr->account = xstrdup(wckey_rec.name);
+		info("%s: setting wckey to %s for job_id %u",
+		     module, wckey_rec.name, job_ptr->job_id);
+	} else {
+		info("%s: cleared wckey for job_id %u",
+		     module, job_ptr->job_id);
+	}
+
 	last_job_update = time(NULL);
 
 	return SLURM_SUCCESS;
 }
 
-extern int send_jobs_to_accounting(time_t event_time)
+extern int send_jobs_to_accounting()
 {
 	ListIterator itr = NULL;
 	struct job_record *job_ptr;
@@ -6105,7 +6229,7 @@ extern int send_jobs_to_accounting(time_t event_time)
 						   accounting_enforce,
 						   (acct_association_rec_t **)
 						   &job_ptr->assoc_ptr) &&
-			   accounting_enforce 
+			   (accounting_enforce & ACCOUNTING_ENFORCE_ASSOCS)
 			   && (!IS_JOB_FINISHED(job_ptr))) {
 				info("Cancelling job %u with "
 				     "invalid association",
diff --git a/src/slurmctld/job_scheduler.c b/src/slurmctld/job_scheduler.c
index 3af4ad961..97b26dece 100644
--- a/src/slurmctld/job_scheduler.c
+++ b/src/slurmctld/job_scheduler.c
@@ -763,8 +763,10 @@ extern int update_job_dependency(struct job_record *job_ptr, char *new_depend)
 	/* Clear dependencies on NULL or empty dependency input */
 	if ((new_depend == NULL) || (new_depend[0] == '\0')) {
 		xfree(job_ptr->details->dependency);
-		if (job_ptr->details->depend_list)
+		if (job_ptr->details->depend_list) {
 			list_destroy(job_ptr->details->depend_list);
+			job_ptr->details->depend_list = NULL;
+		}
 		return rc;
 
 	}
diff --git a/src/slurmctld/node_scheduler.c b/src/slurmctld/node_scheduler.c
index 26684c6e6..7fb6d043d 100644
--- a/src/slurmctld/node_scheduler.c
+++ b/src/slurmctld/node_scheduler.c
@@ -996,8 +996,9 @@ extern int select_nodes(struct job_record *job_ptr, bool test_only,
 		/* Leave job queued, something is hosed */
 		error("select_g_job_begin(%u): %m", job_ptr->job_id);
 		error_code = ESLURM_NODES_BUSY;
-		job_ptr->start_time = job_ptr->time_last_active 
-			= job_ptr->end_time = 0;
+		job_ptr->start_time = 0;
+		job_ptr->time_last_active = 0;
+		job_ptr->end_time = 0;
 		goto cleanup;
 	}
 
diff --git a/src/slurmctld/proc_req.c b/src/slurmctld/proc_req.c
index dc6b38291..c9cd63ec7 100644
--- a/src/slurmctld/proc_req.c
+++ b/src/slurmctld/proc_req.c
@@ -3005,8 +3005,7 @@ inline static void  _slurm_rpc_accounting_first_reg(slurm_msg_t *msg)
 		return;
 	}
 	
-	send_jobs_to_accounting(event_time);
-	send_nodes_to_accounting(event_time);
+	send_all_to_accounting(event_time);
 	
 	END_TIMER2("_slurm_rpc_accounting_first_reg");
 }
diff --git a/src/slurmctld/read_config.c b/src/slurmctld/read_config.c
index dd7e0bbab..195136685 100644
--- a/src/slurmctld/read_config.c
+++ b/src/slurmctld/read_config.c
@@ -1203,7 +1203,7 @@ static int _restore_job_dependencies(void)
 	assoc_mgr_clear_used_info();
 	job_iterator = list_iterator_create(job_list);
 	while ((job_ptr = (struct job_record *) list_next(job_iterator))) {
-		if (accounting_enforce == ACCOUNTING_ENFORCE_WITH_LIMITS) {
+		if (accounting_enforce & ACCOUNTING_ENFORCE_LIMITS) {
 			if((job_ptr->job_state == JOB_RUNNING) ||
 			   (job_ptr->job_state == JOB_SUSPENDED))
 				acct_policy_job_begin(job_ptr);
diff --git a/src/slurmctld/sched_plugin.c b/src/slurmctld/sched_plugin.c
index 187143b36..4f86d83ab 100644
--- a/src/slurmctld/sched_plugin.c
+++ b/src/slurmctld/sched_plugin.c
@@ -181,13 +181,14 @@ slurm_sched_context_create( const char *sched_type )
 static int
 slurm_sched_context_destroy( slurm_sched_context_t *c )
 {
+	int rc = SLURM_SUCCESS;
 	/*
 	 * Must check return code here because plugins might still
 	 * be loaded and active.
 	 */
 	if ( c->plugin_list ) {
 		if ( plugrack_destroy( c->plugin_list ) != SLURM_SUCCESS ) {
-			return SLURM_ERROR;
+			rc = SLURM_ERROR;
 		}
 	} else {
 		plugin_unload(c->cur_plugin);
@@ -196,7 +197,7 @@ slurm_sched_context_destroy( slurm_sched_context_t *c )
 	xfree( c->sched_type );
 	xfree( c );
 
-	return SLURM_SUCCESS;
+	return rc;
 }
 
 
diff --git a/src/slurmctld/slurmctld.h b/src/slurmctld/slurmctld.h
index 089beaa7c..bb1cd87b2 100644
--- a/src/slurmctld/slurmctld.h
+++ b/src/slurmctld/slurmctld.h
@@ -1335,10 +1335,13 @@ extern void run_health_check(void);
 /* save_all_state - save entire slurmctld state for later recovery */
 extern void save_all_state(void);
 
+/* send all info for the controller to accounting */
+extern void send_all_to_accounting(time_t event_time);
+
 /* sends all jobs in eligible state to accounting.  Only needed at
  * first registration
  */
-extern int send_jobs_to_accounting(time_t event_time);
+extern int send_jobs_to_accounting();
 
 /* send all nodes in a down like state to accounting.  Only needed at
  * first registration
@@ -1481,6 +1484,16 @@ extern int update_job (job_desc_msg_t * job_specs, uid_t uid);
 extern int update_job_account(char *module, struct job_record *job_ptr, 
 			      char *new_account);
 
+/*
+ * Modify the wckey associated with a pending job
+ * IN module - where this is called from
+ * IN job_ptr - pointer to job which should be modified
+ * IN new_wckey - desired wckey name
+ * RET SLURM_SUCCESS or error code
+ */
+extern int update_job_wckey(char *module, struct job_record *job_ptr, 
+			    char *new_wckey);
+
 /* Reset nodes_completing field for all jobs */
 extern void update_job_nodes_completing(void);
 
diff --git a/src/slurmctld/step_mgr.c b/src/slurmctld/step_mgr.c
index a130a5783..d796ebd2e 100644
--- a/src/slurmctld/step_mgr.c
+++ b/src/slurmctld/step_mgr.c
@@ -1,6 +1,6 @@
 /*****************************************************************************\
  *  step_mgr.c - manage the job step information of slurm
- *  $Id: step_mgr.c 15827 2008-12-04 20:17:23Z jette $
+ *  $Id: step_mgr.c 16584 2009-02-18 19:03:40Z jette $
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -1100,7 +1100,7 @@ extern slurm_step_layout_t *step_layout_create(struct step_record *step_ptr,
 					error("step_layout_create exclusive");
 					return NULL;
 				}
-				usable_cpus = MAX(usable_cpus, 
+				usable_cpus = MIN(usable_cpus, 
 						  (num_tasks - set_cpus));
 			} else
 				usable_cpus = job_ptr->alloc_lps[pos];
diff --git a/src/slurmctld/trigger_mgr.c b/src/slurmctld/trigger_mgr.c
index 00d112a67..f9ae9f399 100644
--- a/src/slurmctld/trigger_mgr.c
+++ b/src/slurmctld/trigger_mgr.c
@@ -977,6 +977,8 @@ static void _trigger_run_program(trig_mgr_info_t *trig_in)
 		trig_in->group_id = child;
 	} else if (child == 0) {
 		int i;
+		bool run_as_self = (uid == getuid());
+
 		for (i=0; i<128; i++)
 			close(i);
 #ifdef SETPGRP_TWO_ARGS
@@ -985,9 +987,18 @@ static void _trigger_run_program(trig_mgr_info_t *trig_in)
 		setpgrp();
 #endif
 		setsid();
-		setuid(uid);
-		setgid(gid);
-		initgroups(user_name, -1);
+		if ((initgroups(user_name, gid) == -1) && !run_as_self) {
+			error("trigger: initgroups: %m");
+			exit(1);
+		}
+		if ((setgid(gid) == -1) && !run_as_self){
+			error("trigger: setgid: %m");
+			exit(1);
+		}
+		if ((setuid(uid) == -1) && !run_as_self) {
+			error("trigger: setuid: %m");
+			exit(1);
+		}
 		execl(program, arg0, arg1, NULL);
 		exit(1);
 	} else
diff --git a/src/slurmd/common/proctrack.c b/src/slurmd/common/proctrack.c
index 4e3d33003..ec47637f3 100644
--- a/src/slurmd/common/proctrack.c
+++ b/src/slurmd/common/proctrack.c
@@ -173,13 +173,15 @@ _proctrack_context_create( const char *proctrack_type )
 static int
 _proctrack_context_destroy( slurm_proctrack_context_t *c )
 {
+	int rc = SLURM_SUCCESS;
+
 	/*
 	 * Must check return code here because plugins might still
 	 * be loaded and active.
 	 */
 	if ( c->plugin_list ) {
 		if ( plugrack_destroy( c->plugin_list ) != SLURM_SUCCESS ) {
-			return SLURM_ERROR;
+			rc = SLURM_ERROR;
 		}
 	} else {
 		plugin_unload(c->cur_plugin);
@@ -188,7 +190,7 @@ _proctrack_context_destroy( slurm_proctrack_context_t *c )
 	xfree( c->proctrack_type );
 	xfree( c );
 
-	return SLURM_SUCCESS;
+	return rc;
 }
 
 
diff --git a/src/slurmd/common/task_plugin.c b/src/slurmd/common/task_plugin.c
index 3841edb68..63ec388a6 100644
--- a/src/slurmd/common/task_plugin.c
+++ b/src/slurmd/common/task_plugin.c
@@ -158,13 +158,14 @@ _slurmd_task_context_create(const char *task_plugin_type)
 static int
 _slurmd_task_context_destroy(slurmd_task_context_t *c)
 {
+	int rc = SLURM_SUCCESS;
 	/*
 	 * Must check return code here because plugins might still
 	 * be loaded and active.
 	 */
 	if ( c->plugin_list ) {
 		if ( plugrack_destroy( c->plugin_list ) != SLURM_SUCCESS ) {
-			return SLURM_ERROR;
+			rc = SLURM_ERROR;
 		}
 	} else {
 		plugin_unload(c->cur_plugin);
@@ -173,7 +174,7 @@ _slurmd_task_context_destroy(slurmd_task_context_t *c)
 	xfree( c->task_type );
 	xfree( c );
 
-	return SLURM_SUCCESS;
+	return rc;
 }
 
 
diff --git a/src/slurmd/slurmd/req.c b/src/slurmd/slurmd/req.c
index bf968b8c8..cadf2417e 100644
--- a/src/slurmd/slurmd/req.c
+++ b/src/slurmd/slurmd/req.c
@@ -108,6 +108,7 @@ static bool _slurm_authorized_user(uid_t uid);
 static void _job_limits_free(void *x);
 static int  _job_limits_match(void *x, void *key);
 static bool _job_still_running(uint32_t job_id);
+static int  _init_groups(uid_t my_uid, gid_t my_gid);
 static int  _kill_all_active_steps(uint32_t jobid, int sig, bool batch);
 static int  _terminate_all_steps(uint32_t jobid, bool batch);
 static void _rpc_launch_tasks(slurm_msg_t *);
@@ -1835,13 +1836,35 @@ static void  _rpc_pid2jid(slurm_msg_t *msg)
 	}
 }
 
+static int
+_init_groups(uid_t my_uid, gid_t my_gid)
+{
+	char *user_name = uid_to_string(my_uid);
+	int rc;
+
+	if (user_name == NULL) {
+		error("sbcast: Could not find uid %ld", (long)my_uid);
+		return -1;
+	}
+
+	rc = initgroups(user_name, my_gid);
+	xfree(user_name);
+	if (rc) {
+ 		error("sbcast: Error in initgroups(%s, %ld): %m",
+		      user_name, (long)my_gid);
+		return -1;
+	}
+	return 0;
+
+}
+
 static int
 _rpc_file_bcast(slurm_msg_t *msg)
 {
 	file_bcast_msg_t *req = msg->data;
 	int fd, flags, offset, inx, rc;
 	uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
-	uid_t req_gid = g_slurm_auth_get_gid(msg->auth_cred, NULL);
+	gid_t req_gid = g_slurm_auth_get_gid(msg->auth_cred, NULL);
 	pid_t child;
 
 #if 0
@@ -1867,6 +1890,10 @@ _rpc_file_bcast(slurm_msg_t *msg)
 
 	/* The child actually performs the I/O and exits with 
 	 * a return code, do not return! */
+	if (_init_groups(req_uid, req_gid) < 0) {
+		error("sbcast: initgroups(%u): %m", req_uid);
+		exit(errno);
+	}
 	if (setgid(req_gid) < 0) {
 		error("sbcast: uid:%u setgid(%u): %s", req_uid, req_gid, 
 			strerror(errno));
diff --git a/src/slurmd/slurmd/slurmd.c b/src/slurmd/slurmd/slurmd.c
index dfd5e2619..600ace4bb 100644
--- a/src/slurmd/slurmd/slurmd.c
+++ b/src/slurmd/slurmd/slurmd.c
@@ -1,6 +1,6 @@
 /*****************************************************************************\
  *  src/slurmd/slurmd/slurmd.c - main slurm node server daemon
- *  $Id: slurmd.c 15572 2008-11-03 23:14:27Z jette $
+ *  $Id: slurmd.c 17177 2009-04-07 18:09:43Z jette $
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008 Lawrence Livermore National Security.
@@ -43,6 +43,7 @@
 #endif
 
 #include <fcntl.h>
+#include <grp.h>
 #include <string.h>
 #include <stdlib.h>
 #include <pthread.h>
@@ -159,6 +160,18 @@ main (int argc, char *argv[])
 	for (i=3; i<256; i++)
 		(void) close(i);
 
+	/*
+	 * Drop supplementary groups.
+	 */
+	if (geteuid() == 0) {
+		if (setgroups(0, NULL) != 0) {
+			fatal("Failed to drop supplementary groups, "
+			      "setgroups: %m");
+		}
+	} else {
+		info("Not running as root. Can't drop supplementary groups");
+	}
+
 	/*
 	 * Create and set default values for the slurmd global
 	 * config variable "conf"
diff --git a/src/slurmd/slurmstepd/mgr.c b/src/slurmd/slurmstepd/mgr.c
index bb6b53623..3ad0e1d36 100644
--- a/src/slurmd/slurmstepd/mgr.c
+++ b/src/slurmd/slurmstepd/mgr.c
@@ -1,6 +1,6 @@
 /*****************************************************************************\
  *  src/slurmd/slurmstepd/mgr.c - job manager functions for slurmstepd
- *  $Id: mgr.c 15835 2008-12-04 23:59:29Z jette $
+ *  $Id: mgr.c 17040 2009-03-26 15:03:18Z jette $
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008 Lawrence Livermore National Security.
@@ -949,6 +949,7 @@ _fork_all_tasks(slurmd_job_t *job)
 			goto fail2;
 		} else if (pid == 0)  { /* child */
 			int j;
+
 #ifdef HAVE_AIX
 			(void) mkcrid(0);
 #endif
diff --git a/src/slurmd/slurmstepd/slurmstepd.c b/src/slurmd/slurmstepd/slurmstepd.c
index 1fe141065..3142bd71c 100644
--- a/src/slurmd/slurmstepd/slurmstepd.c
+++ b/src/slurmd/slurmstepd/slurmstepd.c
@@ -1,6 +1,6 @@
 /*****************************************************************************\
  *  src/slurmd/slurmstepd/slurmstepd.c - SLURM job-step manager.
- *  $Id: slurmstepd.c 15819 2008-12-03 23:32:14Z jette $
+ *  $Id: slurmstepd.c 17040 2009-03-26 15:03:18Z jette $
  *****************************************************************************
  *  Copyright (C) 2002-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -361,7 +361,7 @@ _step_setup(slurm_addr *cli, slurm_addr *self, slurm_msg_t *msg)
 	return job;
 }
 
-#ifdef MEMORY_LEAK_TEST
+#ifdef MEMORY_LEAK_DEBUG
 static void
 _step_cleanup(slurmd_job_t *job, slurm_msg_t *msg, int rc)
 {
diff --git a/src/slurmd/slurmstepd/task.c b/src/slurmd/slurmstepd/task.c
index cf666b54e..dc0a6f1e3 100644
--- a/src/slurmd/slurmstepd/task.c
+++ b/src/slurmd/slurmstepd/task.c
@@ -98,6 +98,7 @@
  * Static prototype definitions.
  */
 static void  _make_tmpdir(slurmd_job_t *job);
+static void  _print_stdout(char *buf);
 static int   _run_script_and_set_env(const char *name, const char *path, 
 				     slurmd_job_t *job);
 static void  _update_env(char *buf, char ***env);
@@ -105,8 +106,7 @@ static char *_uint32_array_to_str(int array_len, const uint32_t *array);
 
 /* Search for "export NAME=value" records in buf and 
  * use them to add environment variables to env */
-static void
-_update_env(char *buf, char ***env)
+static void _update_env(char *buf, char ***env)
 {
 	char *tmp_ptr, *name_ptr, *val_ptr, *buf_ptr = buf;
 
@@ -132,7 +132,32 @@ _update_env(char *buf, char ***env)
 		}
 		debug("name:%s:val:%s:",name_ptr,val_ptr);
 		if (setenvf(env, name_ptr, "%s", val_ptr))
-			error("Unable to set %s environment variable", name_ptr);
+			error("Unable to set %s environment variable", 
+			      name_ptr);
+	}		
+}
+
+/* Search for "print <whatever>" records in buf and 
+ * write that to the job's stdout */
+static void _print_stdout(char *buf)
+{
+	char *tmp_ptr, *buf_ptr = buf;
+
+	while ((tmp_ptr = strstr(buf_ptr, "print "))) {
+		if ((tmp_ptr != buf_ptr) && (tmp_ptr[-1] != '\n')) {
+			/* Skip "print " if not at start of a line */
+			buf_ptr +=6;
+			continue;
+		}
+		buf_ptr = tmp_ptr + 6;
+		tmp_ptr = strchr(buf_ptr, '\n');
+		if (tmp_ptr) {
+			write(1, buf_ptr, (tmp_ptr - buf_ptr + 1));
+			buf_ptr = tmp_ptr + 1;
+		} else {
+			write(1, buf_ptr, strlen(buf_ptr));
+			break;
+		}
 	}		
 }
 
@@ -196,6 +221,7 @@ _run_script_and_set_env(const char *name, const char *path, slurmd_job_t *job)
 		buf[nread] = 0;
 		//debug("read %d:%s:", nread, buf);
 		_update_env(buf, &job->env);
+		_print_stdout(buf);
 	}
 
 	close(pfd[0]);
diff --git a/src/slurmdbd/proc_req.c b/src/slurmdbd/proc_req.c
index f5784e9f3..4fba01334 100644
--- a/src/slurmdbd/proc_req.c
+++ b/src/slurmdbd/proc_req.c
@@ -1,7 +1,7 @@
 /*****************************************************************************\
  *  proc_req.c - functions for processing incoming RPCs.
  *****************************************************************************
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
  *  LLNL-CODE-402394.
@@ -803,7 +803,12 @@ static int _archive_dump(slurmdbd_conn_t *slurmdbd_conn,
 		arch_cond->step_purge = slurmdbd_conf->step_purge;
 
 	rc = jobacct_storage_g_archive(slurmdbd_conn->db_conn, arch_cond);
-
+	if(rc != SLURM_SUCCESS) {
+		if(errno == EACCES) 
+			comment = "Problem accessing file.";
+		else
+			comment = "Error with request.";
+	}
 end_it:
 	slurmdbd_free_cond_msg(slurmdbd_conn->rpc_version, 
 			       DBD_ARCHIVE_DUMP, get_msg);
@@ -839,7 +844,10 @@ static int _archive_load(slurmdbd_conn_t *slurmdbd_conn,
 	}
 	
 	rc = jobacct_storage_g_archive_load(slurmdbd_conn->db_conn, arch_rec);
-
+	if(rc == ENOENT) 
+		comment = "No archive file given to recover.";
+	else if(rc != SLURM_SUCCESS)
+		comment = "Error with request.";
 end_it:
 	destroy_acct_archive_rec(arch_rec);
 	*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
diff --git a/src/slurmdbd/read_config.c b/src/slurmdbd/read_config.c
index d7acc2d67..aaa1c213d 100644
--- a/src/slurmdbd/read_config.c
+++ b/src/slurmdbd/read_config.c
@@ -114,8 +114,8 @@ static void _clear_slurmdbd_conf(void)
 extern int read_slurmdbd_conf(void)
 {
 	s_p_options_t options[] = {
-		{"ArchiveJobs", S_P_BOOLEAN},
 		{"ArchiveDir", S_P_STRING},
+		{"ArchiveJobs", S_P_BOOLEAN},
 		{"ArchiveScript", S_P_STRING},
 		{"ArchiveSteps", S_P_BOOLEAN},
 		{"AuthInfo", S_P_STRING},
diff --git a/src/slurmdbd/rpc_mgr.c b/src/slurmdbd/rpc_mgr.c
index adba87fb7..c487f3d00 100644
--- a/src/slurmdbd/rpc_mgr.c
+++ b/src/slurmdbd/rpc_mgr.c
@@ -222,7 +222,7 @@ static void * _service_connection(void *arg)
 			rc = proc_req(
 				conn, msg, msg_size, first, &buffer, &uid);
 			first = false;
-			if (rc != SLURM_SUCCESS) {
+			if (rc != SLURM_SUCCESS && rc != ACCOUNTING_FIRST_REG) {
 				error("Processing last message from "
 				      "connection %d(%s) uid(%d)",
 				      conn->newsockfd, conn->ip, uid);
diff --git a/src/smap/configure_functions.c b/src/smap/configure_functions.c
index fca5cf5ab..7e0807f7a 100644
--- a/src/smap/configure_functions.c
+++ b/src/smap/configure_functions.c
@@ -98,9 +98,12 @@ static allocated_block_t *_make_request(ba_request_t *request)
 			  request->geometry[2]);
 		return NULL;
 	} else {
-		if(request->passthrough)
+		char *pass = ba_passthroughs_string(request->deny_pass);
+		if(pass) {
 			sprintf(error_string,"THERE ARE PASSTHROUGHS IN "
-				"THIS ALLOCATION!!!!!!!");
+				"THIS ALLOCATION DIM %s!!!!!!!", pass);
+			xfree(pass);
+		}
 		
 		allocated_block = (allocated_block_t *)xmalloc(
 			sizeof(allocated_block_t));
@@ -204,7 +207,9 @@ static int _create_allocation(char *com, List allocated_blocks)
 	allocated_block_t *allocated_block = NULL;
 	ba_request_t *request = (ba_request_t*) xmalloc(sizeof(ba_request_t)); 
 	int diff=0;
-
+#ifndef HAVE_BGL
+	int small16=-1, small64=-1, small256=-1;
+#endif
 	request->geometry[0] = (uint16_t)NO_VAL;
 	request->conn_type=SELECT_TORUS;
 	request->rotate = false;
@@ -213,7 +218,7 @@ static int _create_allocation(char *com, List allocated_blocks)
 	request->size = 0;
 	request->small32 = 0;
 	request->small128 = 0;
-	request->passthrough = false;
+	request->deny_pass = 0;
 	request->avail_node_bitmap = NULL;
 
 	while(i<len) {				
@@ -223,12 +228,28 @@ static int _create_allocation(char *com, List allocated_blocks)
 		} else if(!strncasecmp(com+i, "small", 5)) {
 			request->conn_type = SELECT_SMALL;
 			i+=5;
+		} else if(!strncasecmp(com+i, "deny", 4)) {
+			i+=4;
+			if(strstr(com+i, "X")) 
+				request->deny_pass |= PASS_DENY_X;
+			if(strstr(com+i, "Y")) 
+				request->deny_pass |= PASS_DENY_Y;
+			if(strstr(com+i, "Z")) 
+				request->deny_pass |= PASS_DENY_Z;
+			if(!strcasecmp(com+i, "ALL")) 
+				request->deny_pass |= PASS_DENY_ALL;
 		} else if(!strncasecmp(com+i, "nodecard", 8)) {
 			small32=0;
-			i+=5;
+			i+=8;
 		} else if(!strncasecmp(com+i, "quarter", 7)) {
 			small128=0;
-			i+=6;
+			i+=7;
+		} else if(!strncasecmp(com+i, "32CN", 4)) {
+			small32=0;
+			i+=4;
+		} else if(!strncasecmp(com+i, "128CN", 5)) {
+			small128=0;
+			i+=5;
 		} else if(!strncasecmp(com+i, "rotate", 6)) {
 			request->rotate=true;
 			i+=6;
@@ -244,13 +265,35 @@ static int _create_allocation(char *com, List allocated_blocks)
 			      || (com[i] >= 'A' && com[i] <= 'Z'))) {
 			starti=i;
 			i++;
-		} else if(small32 == 0 && (com[i] < 58 && com[i] > 47)) {
+		} else if(small32 == 0 && (com[i] >= '0' && com[i] <= '9')) {
 			small32=i;
 			i++;
-		} else if(small128 == 0 && (com[i] < 58 && com[i] > 47)) {
+		} else if(small128 == 0 && (com[i] >= '0' && com[i] <= '9')) {
 			small128=i;
 			i++;
-		} else if(geoi<0 && ((com[i] >= '0' && com[i] <= '9')
+		} 
+#ifndef HAVE_BGL
+		else if(!strncasecmp(com+i, "16CN", 4)) {
+			small16=0;
+			i+=4;
+		} else if(!strncasecmp(com+i, "64CN", 4)) {
+			small64=0;
+			i+=4;
+		} else if(!strncasecmp(com+i, "256CN", 5)) {
+			small256=0;
+			i+=5;
+		} else if(small16 == 0 && (com[i] >= '0' && com[i] <= '9')) {
+			small16=i;
+			i++;
+		} else if(small64 == 0 && (com[i] >= '0' && com[i] <= '9')) {
+			small64=i;
+			i++;
+		} else if(small256 == 0 && (com[i] >= '0' && com[i] <= '9')) {
+			small256=i;
+			i++;
+		} 
+#endif
+		else if(geoi<0 && ((com[i] >= '0' && com[i] <= '9')
 				     || (com[i] >= 'A' && com[i] <= 'Z'))) {
 			geoi=i;
 			i++;
@@ -261,36 +304,79 @@ static int _create_allocation(char *com, List allocated_blocks)
 	}		
 	
 	if(request->conn_type == SELECT_SMALL) {
+		int total = 512;
+#ifndef HAVE_BGL
+		if(small16 > 0) {
+			request->small16 = atoi(&com[small16]);
+			total -= request->small16 * 16;
+		}
+
+		if(small64 > 0) {
+			request->small64 = atoi(&com[small64]);
+			total -= request->small64 * 64;
+		}
+
+		if(small256 > 0) {
+			request->small256 = atoi(&com[small256]);
+			total -= request->small256 * 256;
+		}
+#endif
+
 		if(small32 > 0) {
 			request->small32 = atoi(&com[small32]);
-			small32 = request->small32/4;
-			request->small32 = small32*4;
+			total -= request->small32 * 32;
 		}
 
-		request->small128 = 4;
-		
-		if(request->small32 > 0)
-			request->small128 -= small32;
-
-		if(request->small128 > 4) {
-			request->small128 = 4;
-			request->small32 = 0;
-		} else if(request->small32 > 16) {
-			request->small128 = 0;
-			request->small32 = 16;
+		if(small128 > 0) {
+			request->small128 = atoi(&com[small128]);
+			total -= request->small128 * 128;
 		}
-		
-		small128 = request->small128*4;
-		small32 = request->small32;
-		if((small128+small32) > 16) {
+		if(total < 0) {
 			sprintf(error_string, 
-				"please specify a complete split of a "
-				"Base Partion\n"
-				"(i.e. small32=4)");
+				"You asked for %d more nodes than "
+				"are in a Midplane\n", total * 2);
 			geoi = -1;
+
+		} 
+
+#ifndef HAVE_BGL
+		while(total > 0) {
+			if(total >= 256) {
+				request->small256++;
+				total -= 256;
+			} else if(total >= 128) {
+				request->small128++;
+				total -= 128;
+			} else if(total >= 64) {
+				request->small64++;
+				total -= 64;
+			} else if(total >= 32) {
+				request->small32++;
+				total -= 32;
+			} else if(total >= 16) {
+				request->small16++;
+				total -= 16;
+			} else
+				break;
 		}
+#else
+		while(total > 0) {
+			if(total >= 128) {
+				request->small128++;
+				total -= 128;
+			} else if(total >= 32) {
+				request->small32++;
+				total -= 32;
+			} else
+				break;
+		}
+#endif
 		request->size = 1;
-				
+/* 		sprintf(error_string, */
+/* 			"got %d %d %d %d %d %d", */
+/* 			total, request->small16, request->small32, */
+/* 			request->small64, request->small128, */
+/* 			request->small256); */
 	}
 
 	if(geoi<0 && !request->size) {
@@ -858,6 +944,12 @@ static int _copy_allocation(char *com, List allocated_blocks)
 		request->conn_type=allocated_block->request->conn_type;
 		request->rotate =allocated_block->request->rotate;
 		request->elongate = allocated_block->request->elongate;
+		request->deny_pass = allocated_block->request->deny_pass;
+#ifndef HAVE_BGL
+		request->small16 = allocated_block->request->small16;
+		request->small64 = allocated_block->request->small64;
+		request->small256 = allocated_block->request->small256;
+#endif
 		request->small32 = allocated_block->request->small32;
 		request->small128 = allocated_block->request->small128;
 				
@@ -961,9 +1053,23 @@ static int _save_allocation(char *com, List allocated_blocks)
 				conn_type = "MESH";
 			else {
 				conn_type = "SMALL";
-				xstrfmtcat(extra, " NodeCards=%d Quarters=%d",
+#ifndef HAVE_BGL
+				xstrfmtcat(extra, 
+					   " 16CNBlocks=%d 32CNBlocks=%d "
+					   "64CNBlocks=%d 128CNBlocks=%d "
+					   "256CNBlocks=%d",
+					   allocated_block->request->small16,
+					   allocated_block->request->small32,
+					   allocated_block->request->small64,
+					   allocated_block->request->small128,
+					   allocated_block->request->small256);
+#else
+				xstrfmtcat(extra, 
+					   " 32CNBlocks=%d 128CNBlocks=%d",
 					   allocated_block->request->small32,
 					   allocated_block->request->small128);
+
+#endif
 			}
 			xstrfmtcat(save_string, "BPs=%s Type=%s", 
 				   allocated_block->request->save_name, 
@@ -1226,12 +1332,28 @@ static void _print_header_command(void)
 		  main_xcord, "NODES");
 #endif
 	main_xcord += 10;
+
+#ifndef HAVE_BGL
+	mvwprintw(text_win, main_ycord,
+		  main_xcord, "16CN");
+	main_xcord += 5;
+#endif
 	mvwprintw(text_win, main_ycord,
-		  main_xcord, "NODECARDS");
-	main_xcord += 11;
+		  main_xcord, "32CN");
+	main_xcord += 5;
+#ifndef HAVE_BGL
 	mvwprintw(text_win, main_ycord,
-		  main_xcord, "QUARTERS");
-	main_xcord += 10;
+		  main_xcord, "64CN");
+	main_xcord += 5;
+#endif
+	mvwprintw(text_win, main_ycord,
+		  main_xcord, "128CN");
+	main_xcord += 6;
+#ifndef HAVE_BGL
+	mvwprintw(text_win, main_ycord,
+		  main_xcord, "256CN");
+	main_xcord += 6;
+#endif
 #ifdef HAVE_BG
 	mvwprintw(text_win, main_ycord,
 		  main_xcord, "BP_LIST");
@@ -1249,7 +1371,7 @@ static void _print_text_command(allocated_block_t *allocated_block)
 		COLOR_PAIR(allocated_block->color));
 			
 	mvwprintw(text_win, main_ycord,
-		  main_xcord, "%c",allocated_block->letter);
+		  main_xcord, "%c", allocated_block->letter);
 	main_xcord += 4;
 	if(allocated_block->request->conn_type==SELECT_TORUS) 
 		mvwprintw(text_win, main_ycord,
@@ -1283,18 +1405,40 @@ static void _print_text_command(allocated_block_t *allocated_block)
 	main_xcord += 10;
 	
 	if(allocated_block->request->conn_type == SELECT_SMALL) {
+#ifndef HAVE_BGL
+		mvwprintw(text_win, main_ycord,
+			  main_xcord, "%d", 
+			  allocated_block->request->small16);
+		main_xcord += 5;
+#endif
 		mvwprintw(text_win, main_ycord,
 			  main_xcord, "%d", 
 			  allocated_block->request->small32);
-		main_xcord += 11;
+		main_xcord += 5;
+#ifndef HAVE_BGL
+		mvwprintw(text_win, main_ycord,
+			  main_xcord, "%d", 
+			  allocated_block->request->small64);
+		main_xcord += 5;
+#endif
 		mvwprintw(text_win, main_ycord,
 			  main_xcord, "%d", 
 			  allocated_block->request->small128);
-		main_xcord += 10;
+		main_xcord += 6;
+#ifndef HAVE_BGL
+		mvwprintw(text_win, main_ycord,
+			  main_xcord, "%d", 
+			  allocated_block->request->small256);
+		main_xcord += 6;
+#endif
 		
 	} else
-		main_xcord += 21;
-	
+#ifndef HAVE_BGL
+		main_xcord += 27;
+#else
+		main_xcord += 11;
+#endif	
+
 	mvwprintw(text_win, main_ycord,
 		  main_xcord, "%s",
 		  allocated_block->request->save_name);
@@ -1393,7 +1537,9 @@ void get_command(void)
 			_delete_allocated_blocks(allocated_blocks);
 			ba_fini();
 			exit(0);
-		} if (!strcmp(com, "quit")) {
+		} 
+		
+		if (!strcmp(com, "quit") || !strcmp(com, "\\q")) {
 			break;
 		} else if (!strncasecmp(com, "layout", 6)) {
 			_set_layout(com);
diff --git a/src/smap/job_functions.c b/src/smap/job_functions.c
index 602b9f6c0..459ed82b1 100644
--- a/src/smap/job_functions.c
+++ b/src/smap/job_functions.c
@@ -195,7 +195,7 @@ static void _print_header_job(void)
 		main_xcord += 3;
 		mvwprintw(text_win, main_ycord,
 			  main_xcord, "JOBID");
-		main_xcord += 6;
+		main_xcord += 8;
 		mvwprintw(text_win, main_ycord,
 			  main_xcord, "PARTITION");
 		main_xcord += 10;
@@ -297,7 +297,7 @@ static int _print_text_job(job_info_t * job_ptr)
 		main_xcord += 3;
 		mvwprintw(text_win, main_ycord,
 			  main_xcord, "%d", job_ptr->job_id);
-		main_xcord += 6;
+		main_xcord += 8;
 		mvwprintw(text_win, main_ycord,
 			  main_xcord, "%.10s", job_ptr->partition);
 		main_xcord += 10;
@@ -323,7 +323,7 @@ static int _print_text_job(job_info_t * job_ptr)
 			  job_state_string_compact(job_ptr->job_state));
 		main_xcord += 2;
 		if(!strcasecmp(job_ptr->nodes,"waiting...")) {
-			sprintf(time_buf,"0:00:00");
+			sprintf(time_buf,"00:00:00");
 		} else {
 			time_diff = now_time - job_ptr->start_time;
 			secs2time_str(time_diff, time_buf, sizeof(time_buf));
@@ -375,7 +375,7 @@ static int _print_text_job(job_info_t * job_ptr)
 		main_xcord = 1;
 		main_ycord++;
 	} else {
-		printf("%5d ", job_ptr->job_id);
+		printf("%8d ", job_ptr->job_id);
 		printf("%9.9s ", job_ptr->partition);
 #ifdef HAVE_BG
 		printf("%16.16s ", 
@@ -391,7 +391,7 @@ static int _print_text_job(job_info_t * job_ptr)
 		printf("%2.2s ",
 		       job_state_string_compact(job_ptr->job_state));
 		if(!strcasecmp(job_ptr->nodes,"waiting...")) {
-			sprintf(time_buf,"0:00:00");
+			sprintf(time_buf,"00:00:00");
 		} else {
 			time_diff = now_time - job_ptr->start_time;
 			secs2time_str(time_diff, time_buf, sizeof(time_buf));
diff --git a/src/smap/partition_functions.c b/src/smap/partition_functions.c
index 84567c433..7850adf62 100644
--- a/src/smap/partition_functions.c
+++ b/src/smap/partition_functions.c
@@ -944,6 +944,7 @@ static int _make_nodelist(char *nodes, List nodelist)
 
 static char* _convert_conn_type(enum connection_type conn_type)
 {
+#ifdef HAVE_BG
 	switch (conn_type) {
 	case (SELECT_MESH):
 		return "MESH";
@@ -953,7 +954,24 @@ static char* _convert_conn_type(enum connection_type conn_type)
 		return "SMALL";
 	case (SELECT_NAV):
 		return "NAV";
+#ifndef HAVE_BGL
+	case SELECT_HTC_S:
+		return "HTC_S";
+		break;
+	case SELECT_HTC_D:
+		return "HTC_D";
+		break;
+	case SELECT_HTC_V:
+		return "HTC_V";
+		break;
+	case SELECT_HTC_L:
+		return "HTC_L";
+		break;
+#endif
+	default:
+		return "?";
 	}
+#endif
 	return "?";
 }
 
diff --git a/src/smap/smap.c b/src/smap/smap.c
index b06662999..0516274e8 100644
--- a/src/smap/smap.c
+++ b/src/smap/smap.c
@@ -182,7 +182,7 @@ part_fini:
 			      COLS, 
 			      LINES);
 			ba_fini();
-			exit(0);
+			exit(1);
 		}
 		
 		raw();
@@ -252,7 +252,7 @@ part_fini:
 			if(!params.commandline)
 				endwin();
 			ba_fini();
-			exit(0);
+			exit(1);
 			break;
 #endif
 		}
diff --git a/src/squeue/opts.c b/src/squeue/opts.c
index 0602cb805..1d38df6c7 100644
--- a/src/squeue/opts.c
+++ b/src/squeue/opts.c
@@ -1,7 +1,7 @@
 /****************************************************************************\
  *  opts.c - srun command line option parsing
  *
- *  $Id: opts.c 15808 2008-12-02 23:38:47Z da $
+ *  $Id: opts.c 16350 2009-01-29 18:16:08Z jette $
  *****************************************************************************
  *  Copyright (C) 2002-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -68,7 +68,7 @@
 
 /* FUNCTIONS */
 static List  _build_job_list( char* str );
-static List  _build_part_list( char* str );
+static List  _build_str_list( char* str );
 static List  _build_state_list( char* str );
 static List  _build_all_states_list( void );
 static List  _build_step_list( char* str );
@@ -107,6 +107,7 @@ parse_command_line( int argc, char* argv[] )
 		{"states",     required_argument, 0, 't'},
 		{"user",       required_argument, 0, 'u'},
 		{"users",      required_argument, 0, 'u'},
+		{"account",    required_argument, 0, 'U'},
 		{"verbose",    no_argument,       0, 'v'},
 		{"version",    no_argument,       0, 'V'},
 		{"help",       no_argument,       0, OPT_LONG_HELP},
@@ -122,7 +123,7 @@ parse_command_line( int argc, char* argv[] )
 	if ( ( env_val = getenv("SQUEUE_SORT") ) )
 		params.sort = xstrdup(env_val);
 
-	while((opt_char = getopt_long(argc, argv, "ahi:j::ln:o:p:s::S:t:u:vV",
+	while((opt_char = getopt_long(argc, argv, "ahi:j::ln:o:p:s::S:t:u:U:vV",
 			long_options, &option_index)) != -1) {
 		switch (opt_char) {
 			case (int)'?':
@@ -172,7 +173,13 @@ parse_command_line( int argc, char* argv[] )
 				xfree(params.partitions);
 				params.partitions = xstrdup(optarg);
 				params.part_list = 
-					_build_part_list( params.partitions );
+					_build_str_list( params.partitions );
+				break;
+			case (int) 'U':
+				xfree(params.accounts);
+				params.accounts = xstrdup(optarg);
+				params.account_list = 
+					_build_str_list( params.accounts );
 				break;
 			case (int) 's':
 				if (optarg) {
@@ -271,7 +278,13 @@ parse_command_line( int argc, char* argv[] )
 	if ( ( params.partitions == NULL ) && 
 	     ( env_val = getenv("SQUEUE_PARTITION") ) ) {
 		params.partitions = xstrdup(env_val);
-		params.part_list = _build_part_list( params.partitions );
+		params.part_list = _build_str_list( params.partitions );
+	}
+
+	if ( ( params.accounts == NULL ) && 
+	     ( env_val = getenv("SQUEUE_ACCOUNT") ) ) {
+		params.accounts = xstrdup(env_val);
+		params.account_list = _build_str_list( params.accounts );
 	}
 
 	if ( ( params.states == NULL ) && 
@@ -838,12 +851,12 @@ _build_job_list( char* str )
 }
 
 /*
- * _build_part_list- build a list of partition names
- * IN str - comma separated list of partition names
- * RET List of partition names
+ * _build_str_list- build a list of strings
+ * IN str - comma separated list of strings
+ * RET List of strings
  */
 static List 
-_build_part_list( char* str )
+_build_str_list( char* str )
 {
 	List my_list;
 	char *part = NULL, *tmp_char = NULL, *my_part_list = NULL;
diff --git a/src/squeue/print.c b/src/squeue/print.c
index 3680e7a0e..2d86c61e9 100644
--- a/src/squeue/print.c
+++ b/src/squeue/print.c
@@ -653,7 +653,9 @@ int _print_job_num_procs(job_info_t * job, int width, bool right, char* suffix)
 	if (job == NULL)	/* Print the Header instead */
 		_print_str("CPUS", width, right, true);
 	else {
-		if (job->job_state == JOB_RUNNING) {
+		if ((job->num_cpu_groups > 0) &&
+		    (job->cpus_per_node) &&
+		    (job->cpu_count_reps)) {
 			uint32_t cnt = 0, i;
 			for (i=0; i<job->num_cpu_groups; i++) {
 				cnt += job->cpus_per_node[i] * 
@@ -1273,7 +1275,7 @@ static int _filter_job(job_info_t * job)
 	ListIterator iterator;
 	uint32_t *job_id, *user;
 	enum job_states *state_id;
-	char *part;
+	char *part, *account;
 
 	if (params.job_list) {
 		filter = 1;
@@ -1302,6 +1304,21 @@ static int _filter_job(job_info_t * job)
 		if (filter == 1)
 			return 2;
 	}
+	
+	if (params.account_list) {
+		filter = 1;
+		iterator = list_iterator_create(params.account_list);
+		while ((account = list_next(iterator))) {
+			 if ((job->account != NULL) &&
+			     (strcmp(account, job->account) == 0)) {
+				filter = 0;
+				break;
+			}
+		}
+		list_iterator_destroy(iterator);
+		if (filter == 1)
+			return 2;
+	}
 
 	if (params.state_list) {
 		filter = 1;
diff --git a/src/squeue/sort.c b/src/squeue/sort.c
index ad66c2dcd..cc52e0b3b 100644
--- a/src/squeue/sort.c
+++ b/src/squeue/sort.c
@@ -259,8 +259,13 @@ static int _sort_job_by_name(void *void1, void *void2)
 	int diff;
 	job_info_t *job1 = (job_info_t *) void1;
 	job_info_t *job2 = (job_info_t *) void2;
+	char *val1 = "", *val2 = "";
 
-	diff = strcmp(job1->name, job2->name);
+	if (job1->name)
+		val1 = job1->name;
+	if (job2->name)
+		val2 = job2->name;
+	diff = strcmp(val1, val2);
 
 	if (reverse_order)
 		diff = -diff;
@@ -565,8 +570,13 @@ static int _sort_job_by_partition(void *void1, void *void2)
 	int diff;
 	job_info_t *job1 = (job_info_t *) void1;
 	job_info_t *job2 = (job_info_t *) void2;
+	char *val1 = "", *val2 = "";
 
-	diff = strcmp(job1->partition, job2->partition);
+	if (job1->partition)
+		val1 = job1->partition;
+	if (job2->partition)
+		val2 = job2->partition;
+	diff = strcmp(val1, val2);
 
 	if (reverse_order)
 		diff = -diff;
@@ -696,8 +706,13 @@ static int _sort_step_by_partition(void *void1, void *void2)
 	int diff;
 	job_step_info_t *step1 = (job_step_info_t *) void1;
 	job_step_info_t *step2 = (job_step_info_t *) void2;
+	char *val1 = "", *val2 = "";
 
-	diff = strcmp(step1->partition, step2->partition);
+	if (step1->partition)
+		val1 = step1->partition;
+	if (step2->partition)
+		val2 = step2->partition;
+	diff = strcmp(val1, val2);
 
 	if (reverse_order)
 		diff = -diff;
diff --git a/src/squeue/squeue.c b/src/squeue/squeue.c
index 0ab83b8f6..5bedfbaaa 100644
--- a/src/squeue/squeue.c
+++ b/src/squeue/squeue.c
@@ -63,13 +63,14 @@ int max_line_size;
  ************/
 static int  _get_window_width( void );
 static void _print_date( void );
-static void _print_job (void);
-static void _print_job_steps( void );
+static int _print_job (void);
+static int _print_job_steps( void );
 
 int 
 main (int argc, char *argv[]) 
 {
 	log_options_t opts = LOG_OPTS_STDERR_ONLY ;
+	int error_code = SLURM_SUCCESS;
 
 	log_init(xbasename(argv[0]), opts, SYSLOG_FACILITY_USER, NULL);
 	parse_command_line( argc, argv );
@@ -86,9 +87,9 @@ main (int argc, char *argv[])
 			_print_date ();
 		
 		if ( params.step_flag )
-			_print_job_steps( );
-		else 
-			_print_job( );
+			error_code = _print_job_steps( );
+		else
+			error_code = _print_job( );
 		
 		if ( params.iterate ) {
 			printf( "\n");
@@ -98,7 +99,10 @@ main (int argc, char *argv[])
 			break;
 	}
 
-	exit (0);
+	if ( error_code != SLURM_SUCCESS )
+		exit (error_code);
+	else
+		exit (0);
 }
 
 /* get_window_width - return the size of the window STDOUT goes to */
@@ -126,7 +130,7 @@ _get_window_width( void )
 
 
 /* _print_job - print the specified job's information */
-static void 
+static int
 _print_job ( void ) 
 {
 	static job_info_msg_t * old_job_ptr = NULL, * new_job_ptr;
@@ -167,7 +171,7 @@ _print_job ( void )
 
 	if (error_code) {
 		slurm_perror ("slurm_load_jobs error");
-		return;
+		return SLURM_ERROR;
 	}
 	old_job_ptr = new_job_ptr;
 	if (job_id)
@@ -189,12 +193,12 @@ _print_job ( void )
 
 	print_jobs_array( new_job_ptr->job_array, new_job_ptr->record_count , 
 			params.format_list ) ;
-	return;
+	return SLURM_SUCCESS;
 }
 
 
 /* _print_job_step - print the specified job step's information */
-static void
+static int
 _print_job_steps( void )
 {
 	int error_code;
@@ -220,7 +224,7 @@ _print_job_steps( void )
 				&new_step_ptr, show_flags);
 	if (error_code) {
 		slurm_perror ("slurm_get_job_steps error");
-		return;
+		return SLURM_ERROR;
 	}
 	old_step_ptr = new_step_ptr;
 
@@ -236,7 +240,7 @@ _print_job_steps( void )
 	print_steps_array( new_step_ptr->job_steps, 
 			   new_step_ptr->job_step_count, 
 			   params.format_list );
-	return;
+	return SLURM_SUCCESS;
 }
 
 
diff --git a/src/squeue/squeue.h b/src/squeue/squeue.h
index 034880648..b3aef322f 100644
--- a/src/squeue/squeue.h
+++ b/src/squeue/squeue.h
@@ -86,6 +86,7 @@ struct squeue_parameters {
 	char* jobs;
 	hostset_t nodes;
 	char* partitions;
+	char* accounts;
 	char* states;
 	char* steps;
 	char* users;
@@ -94,6 +95,7 @@ struct squeue_parameters {
 
 	List  job_list;
 	List  part_list;
+	List  account_list;
 	List  state_list;
 	List  step_list;
 	List  user_list;
diff --git a/src/sreport/cluster_reports.c b/src/sreport/cluster_reports.c
index 64970d03b..54e9c0672 100644
--- a/src/sreport/cluster_reports.c
+++ b/src/sreport/cluster_reports.c
@@ -103,10 +103,7 @@ static int _set_wckey_cond(int *start, int argc, char *argv[],
 			}
 		}
 
-		if(!end && !strncasecmp(argv[i], "where",
-					MAX(command_len, 5))) {
-			continue;
-		} else if(!end && !strncasecmp(argv[i], "all_clusters", 
+		if(!end && !strncasecmp(argv[i], "all_clusters", 
 					       MAX(command_len, 1))) {
 			local_cluster_flag = 1;
 		} else if(!end && !strncasecmp(argv[i], "withdeleted",
@@ -115,8 +112,6 @@ static int _set_wckey_cond(int *start, int argc, char *argv[],
 			set = 1;
 		} else if(!end
 			  || !strncasecmp (argv[i], "WCKeys",
-					   MAX(command_len, 3))
-			  || !strncasecmp (argv[i], "Names",
 					   MAX(command_len, 3))) {
 			if(!wckey_cond->name_list)
 				wckey_cond->name_list =
@@ -212,13 +207,7 @@ static int _set_assoc_cond(int *start, int argc, char *argv[],
 			}
 		}
 
-		if (!strncasecmp (argv[i], "Set", MAX(command_len, 3))) {
-			i--;
-			break;
-		} else if(!end && !strncasecmp(argv[i], "where", 
-					       MAX(command_len, 5))) {
-			continue;
-		} else if(!end && !strncasecmp(argv[i], "all_clusters", 
+		if(!end && !strncasecmp(argv[i], "all_clusters", 
 					       MAX(command_len, 1))) {
 			local_cluster_flag = 1;
 		} else if (!end && !strncasecmp (argv[i], "Tree",
@@ -319,19 +308,11 @@ static int _set_cluster_cond(int *start, int argc, char *argv[],
 			}
 		}
 
-		if (!strncasecmp (argv[i], "Set", MAX(command_len, 3))) {
-			i--;
-			break;
-		} else if(!end && !strncasecmp(argv[i], "where",
-					       MAX(command_len, 5))) {
-			continue;
-		} else if(!end && !strncasecmp(argv[i], "all_clusters",
+		if(!end && !strncasecmp(argv[i], "all_clusters",
 					       MAX(command_len, 1))) {
 			local_cluster_flag = 1;
 		} else if(!end
 			  || !strncasecmp (argv[i], "Clusters",
-					   MAX(command_len, 1))
-			  || !strncasecmp (argv[i], "Names", 
 					   MAX(command_len, 1))) {
 			slurm_addto_char_list(cluster_cond->cluster_list,
 					      argv[i]+end);
diff --git a/src/sreport/job_reports.c b/src/sreport/job_reports.c
index 39a2e2e09..0a2aac6e7 100644
--- a/src/sreport/job_reports.c
+++ b/src/sreport/job_reports.c
@@ -75,12 +75,14 @@ enum {
 	PRINT_JOB_DUR,
 	PRINT_JOB_NODES,
 	PRINT_JOB_SIZE,
-	PRINT_JOB_USER
+	PRINT_JOB_USER,
+	PRINT_JOB_WCKEY
 };
 
 static List print_fields_list = NULL; /* types are of print_field_t */
 static List grouping_print_fields_list = NULL; /* types are of print_field_t */
 static int print_job_count = 0;
+static bool flat_view = false;
 
 static void _destroy_local_grouping(void *object)
 {
@@ -113,6 +115,58 @@ static void _destroy_cluster_grouping(void *object)
 	}
 }
 
+/* 
+ * Comparator used for sorting clusters alphabetically
+ * 
+ * returns: 1: cluster_a > cluster_b   
+ *           0: cluster_a == cluster_b
+ *           -1: cluster_a < cluster_b
+ * 
+ */
+extern int _sort_cluster_grouping_dec(cluster_grouping_t *cluster_a,
+				      cluster_grouping_t *cluster_b)
+{
+	int diff = 0;
+
+	if(!cluster_a->cluster || !cluster_b->cluster)
+		return 0;
+
+	diff = strcmp(cluster_a->cluster, cluster_b->cluster);
+
+	if (diff > 0)
+		return 1;
+	else if (diff < 0)
+		return -1;
+	
+	return 0;
+}
+
+/* 
+ * Comparator used for sorting clusters alphabetically
+ * 
+ * returns: 1: acct_a > acct_b   
+ *           0: acct_a == acct_b
+ *           -1: acct_a < acct_b
+ * 
+ */
+extern int _sort_acct_grouping_dec(acct_grouping_t *acct_a,
+				   acct_grouping_t *acct_b)
+{
+	int diff = 0;
+
+	if(!acct_a->acct || !acct_b->acct)
+		return 0;
+
+	diff = strcmp(acct_a->acct, acct_b->acct);
+
+	if (diff > 0)
+		return 1;
+	else if (diff < 0)
+		return -1;
+	
+	return 0;
+}
+
 /* returns number of objects added to list */
 extern int _addto_uid_char_list(List char_list, char *names)
 {
@@ -237,13 +291,7 @@ static int _set_cond(int *start, int argc, char *argv[],
 		else
 			command_len=end-1;
 
-		if (!strncasecmp (argv[i], "Set", MAX(command_len, 3))) {
-			i--;
-			break;
-		} else if(!end && !strncasecmp(argv[i], "where", 
-					       MAX(command_len, 5))) {
-			continue;
-		} else if(!end && !strncasecmp(argv[i], "all_clusters",
+		if(!end && !strncasecmp(argv[i], "all_clusters",
 					       MAX(command_len, 1))) {
 			local_cluster_flag = 1;
 			continue;
@@ -251,6 +299,10 @@ static int _set_cond(int *start, int argc, char *argv[],
 					       MAX(command_len, 2))) {
 			print_job_count = 1;
 			continue;
+		} else if (!end && !strncasecmp (argv[i], "FlatView",
+					 MAX(command_len, 2))) {
+			flat_view = true;
+			continue;
 		} else if(!end 
 			  || !strncasecmp (argv[i], "Clusters",
 					   MAX(command_len, 1))) {
@@ -277,7 +329,7 @@ static int _set_cond(int *start, int argc, char *argv[],
 			job_cond->usage_end = parse_time(argv[i]+end, 1);
 			set = 1;
 		} else if (!strncasecmp (argv[i], "Format",
-					 MAX(command_len, 1))) {
+					 MAX(command_len, 2))) {
 			if(format_list)
 				slurm_addto_char_list(format_list, argv[i]+end);
 		} else if (!strncasecmp (argv[i], "Gid", MAX(command_len, 2))) {
@@ -345,6 +397,14 @@ static int _set_cond(int *start, int argc, char *argv[],
 			_addto_uid_char_list(job_cond->userid_list,
 					     argv[i]+end);
 			set = 1;
+		} else if (!strncasecmp (argv[i], "Wckeys", 
+					 MAX(command_len, 2))) {
+			if(!job_cond->wckey_list)
+				job_cond->wckey_list =
+					list_create(slurm_destroy_char);
+			slurm_addto_char_list(job_cond->wckey_list,
+					      argv[i]+end);
+			set = 1;
 		} else {
 			exit_code=1;
 			fprintf(stderr, " Unknown condition: %s\n"
@@ -444,6 +504,12 @@ static int _setup_print_fields_list(List format_list)
 			field->name = xstrdup("User");
 			field->len = 9;
 			field->print_routine = print_fields_str;
+		} else if(!strncasecmp("Wckey", object,
+				       MAX(command_len, 1))) {
+			field->type = PRINT_JOB_WCKEY;
+			field->name = xstrdup("Wckey");
+			field->len = 9;
+			field->print_routine = print_fields_str;
 		} else {
 			exit_code=1;
 			fprintf(stderr, " Unknown field '%s'\n", object);
@@ -610,20 +676,6 @@ extern int job_sizes_grouped_by_top_acct(int argc, char *argv[])
 		goto end_it;
 	}
 
-	memset(&assoc_cond, 0, sizeof(acct_association_cond_t));
-	assoc_cond.id_list = job_cond->associd_list;
-	assoc_cond.cluster_list = job_cond->cluster_list;
-	assoc_cond.partition_list = job_cond->partition_list;
-	if(!job_cond->acct_list || !list_count(job_cond->acct_list)) {
-		job_cond->acct_list = list_create(NULL);
-		list_append(job_cond->acct_list, "root");
-	}
-	assoc_cond.parent_acct_list = job_cond->acct_list;	
-	
-
-	assoc_list = acct_storage_g_get_associations(db_conn, my_uid,
-						     &assoc_cond);
-	
 	if(print_fields_have_header) {
 		char start_char[20];
 		char end_char[20];
@@ -651,6 +703,22 @@ extern int job_sizes_grouped_by_top_acct(int argc, char *argv[])
 	cluster_itr = list_iterator_create(cluster_list);
 	group_itr = list_iterator_create(grouping_list);
 
+	if(flat_view)
+		goto no_assocs;
+
+	memset(&assoc_cond, 0, sizeof(acct_association_cond_t));
+	assoc_cond.id_list = job_cond->associd_list;
+	assoc_cond.cluster_list = job_cond->cluster_list;
+	/* don't limit associations to having the partition_list */
+	//assoc_cond.partition_list = job_cond->partition_list;
+	if(!job_cond->acct_list || !list_count(job_cond->acct_list)) {
+		job_cond->acct_list = list_create(NULL);
+		list_append(job_cond->acct_list, "root");
+	}
+	assoc_cond.parent_acct_list = job_cond->acct_list;	
+	assoc_list = acct_storage_g_get_associations(db_conn, my_uid,
+						     &assoc_cond);
+	
 	if(!assoc_list) {
 		debug2(" No assoc list given.\n");
 		goto no_assocs;
@@ -747,19 +815,21 @@ no_assocs:
 			/* here we are only looking for groups that
 			 * were added with the associations above
 			 */
-			continue;
-/* 			cluster_group =  */
-/* 				xmalloc(sizeof(cluster_grouping_t)); */
-/* 			cluster_group->cluster = xstrdup(local_cluster); */
-/* 			cluster_group->acct_list = */
-/* 				list_create(_destroy_acct_grouping); */
-/* 			list_append(cluster_list, cluster_group); */
+			if(!flat_view)
+				continue;
+			cluster_group =
+				xmalloc(sizeof(cluster_grouping_t));
+			cluster_group->cluster = xstrdup(local_cluster);
+			cluster_group->acct_list =
+				list_create(_destroy_acct_grouping);
+			list_append(cluster_list, cluster_group);
 		}
 
 		acct_itr = list_iterator_create(cluster_group->acct_list);
 		while((acct_group = list_next(acct_itr))) {
-			if(acct_group->lft != (uint32_t)NO_VAL
-			   && job->lft != (uint32_t)NO_VAL) {
+			if(!flat_view 
+			   && (acct_group->lft != (uint32_t)NO_VAL)
+			   && (job->lft != (uint32_t)NO_VAL)) {
 				/* keep separate since we don't want
 				 * to so a strcmp if we don't have to 
 				 */
@@ -772,34 +842,36 @@ no_assocs:
 		list_iterator_destroy(acct_itr);		
 			
 		if(!acct_group) {
-			//char *group = NULL;
-			//uint32_t last_size = 0;
+			char *group = NULL;
+			uint32_t last_size = 0;
 			/* here we are only looking for groups that
 			 * were added with the associations above
 			 */
-			continue;
-/* 			acct_group = xmalloc(sizeof(acct_grouping_t)); */
-/* 			acct_group->acct = xstrdup(local_account); */
-/* 			acct_group->groups = */
-/* 				list_create(_destroy_local_grouping); */
-/* 			list_append(cluster_group->acct_list, acct_group); */
-
-/* 			while((group = list_next(group_itr))) { */
-/* 				local_group = xmalloc(sizeof(local_grouping_t)); */
-/* 				local_group->jobs = list_create(NULL); */
-/* 				local_group->min_size = last_size; */
-/* 				last_size = atoi(group); */
-/* 				local_group->max_size = last_size-1; */
-/* 				list_append(acct_group->groups, local_group); */
-/* 			} */
-/* 			if(last_size) { */
-/* 				local_group = xmalloc(sizeof(local_grouping_t)); */
-/* 				local_group->jobs = list_create(NULL); */
-/* 				local_group->min_size = last_size; */
-/* 				local_group->max_size = INFINITE; */
-/* 				list_append(acct_group->groups, local_group); */
-/* 			} */
-/* 			list_iterator_reset(group_itr); */
+			if(!flat_view)
+				continue;
+
+			acct_group = xmalloc(sizeof(acct_grouping_t));
+			acct_group->acct = xstrdup(local_account);
+			acct_group->groups =
+				list_create(_destroy_local_grouping);
+			list_append(cluster_group->acct_list, acct_group);
+
+			while((group = list_next(group_itr))) {
+				local_group = xmalloc(sizeof(local_grouping_t));
+				local_group->jobs = list_create(NULL);
+				local_group->min_size = last_size;
+				last_size = atoi(group);
+				local_group->max_size = last_size-1;
+				list_append(acct_group->groups, local_group);
+			}
+			if(last_size) {
+				local_group = xmalloc(sizeof(local_grouping_t));
+				local_group->jobs = list_create(NULL);
+				local_group->min_size = last_size;
+				local_group->max_size = INFINITE;
+				list_append(acct_group->groups, local_group);
+			}
+			list_iterator_reset(group_itr);
 		}
 
 		local_itr = list_iterator_create(acct_group->groups);
@@ -826,8 +898,12 @@ no_assocs:
 	
 	itr = list_iterator_create(print_fields_list);
 	itr2 = list_iterator_create(grouping_print_fields_list);
+	list_sort(cluster_list, (ListCmpF)_sort_cluster_grouping_dec);
 	list_iterator_reset(cluster_itr);
 	while((cluster_group = list_next(cluster_itr))) {
+		
+		list_sort(cluster_group->acct_list,
+			  (ListCmpF)_sort_acct_grouping_dec);
 		acct_itr = list_iterator_create(cluster_group->acct_list);
 		while((acct_group = list_next(acct_itr))) {
 			
@@ -905,9 +981,344 @@ end_it:
 	
 	if(assoc_list) {
 		list_destroy(assoc_list);
+		assoc_list = NULL;
+	}
+	
+	if(cluster_list) {
+		list_destroy(cluster_list);
+		cluster_list = NULL;
+	}
+	
+	if(print_fields_list) {
+		list_destroy(print_fields_list);
+		print_fields_list = NULL;
+	}
+
+	if(grouping_print_fields_list) {
+		list_destroy(grouping_print_fields_list);
+		grouping_print_fields_list = NULL;
+	}
+
+	return rc;
+}
+
+extern int job_sizes_grouped_by_wckey(int argc, char *argv[])
+{
+	int rc = SLURM_SUCCESS;
+	acct_job_cond_t *job_cond = xmalloc(sizeof(acct_job_cond_t));
+	acct_wckey_cond_t wckey_cond;
+	acct_wckey_rec_t *wckey = NULL;
+	int i=0;
+
+	ListIterator itr = NULL;
+	ListIterator itr2 = NULL;
+	ListIterator cluster_itr = NULL;
+	ListIterator local_itr = NULL;
+	ListIterator acct_itr = NULL;
+	ListIterator group_itr = NULL;	
+
+	jobacct_job_rec_t *job = NULL;
+	cluster_grouping_t *cluster_group = NULL;
+	acct_grouping_t *acct_group = NULL;
+	local_grouping_t *local_group = NULL;
+
+	print_field_t *field = NULL;
+	print_field_t total_field;
+	uint32_t total_time = 0;
+	sreport_time_format_t temp_format;
+	
+	List job_list = NULL;
+	List cluster_list = NULL;
+	List wckey_list = NULL;
+
+	List format_list = list_create(slurm_destroy_char);
+	List grouping_list = list_create(slurm_destroy_char);
+
+	List header_list = list_create(NULL);
+
+//	sreport_time_format_t temp_time_format = time_format;
+
+	print_fields_list = list_create(destroy_print_field);
+
+	_set_cond(&i, argc, argv, job_cond, format_list, grouping_list);
+	
+	if(!list_count(format_list))
+		slurm_addto_char_list(format_list, "Cl,wc");
+
+	if(!list_count(grouping_list)) 
+		slurm_addto_char_list(grouping_list, "50,250,500,1000");
+	
+	_setup_print_fields_list(format_list);
+	list_destroy(format_list);
+
+	_setup_grouping_print_fields_list(grouping_list);
+
+	/* we don't want to actually query by wckeys in the jobs
+	   here since we may be looking for sub accounts of a specific
+	   account.
+	*/
+	job_list = jobacct_storage_g_get_jobs_cond(db_conn, my_uid, job_cond);
+
+	if(!job_list) {
+		exit_code=1;
+		fprintf(stderr, " Problem with job query.\n");
+		goto end_it;
+	}
+
+	memset(&wckey_cond, 0, sizeof(acct_wckey_cond_t));
+	wckey_cond.name_list = job_cond->wckey_list;
+	wckey_cond.cluster_list = job_cond->cluster_list;
+
+	wckey_list = acct_storage_g_get_wckeys(db_conn, my_uid, &wckey_cond);
+	
+	if(print_fields_have_header) {
+		char start_char[20];
+		char end_char[20];
+		time_t my_start = job_cond->usage_start;
+		time_t my_end = job_cond->usage_end-1;
+
+		slurm_make_time_str(&my_start, start_char, sizeof(start_char));
+		slurm_make_time_str(&my_end, end_char, sizeof(end_char));
+		printf("----------------------------------------"
+		       "----------------------------------------\n");
+		printf("Job Sizes by Wckey %s - %s (%d secs)\n", 
+		       start_char, end_char, 
+		       (job_cond->usage_end - job_cond->usage_start));
+		if(print_job_count)
+			printf("Units are in number of jobs ran\n");
+		else
+			printf("Time reported in %s\n", time_format_string);
+		printf("----------------------------------------"
+		       "----------------------------------------\n");
+	}
+	total_time = job_cond->usage_end - job_cond->usage_start;
+
+	cluster_list = list_create(_destroy_cluster_grouping);
+
+	cluster_itr = list_iterator_create(cluster_list);
+	group_itr = list_iterator_create(grouping_list);
+
+	if(!wckey_list) {
+		debug2(" No wckey list given.\n");
+		goto no_assocs;
+	}
+
+	itr = list_iterator_create(wckey_list);
+	while((wckey = list_next(itr))) {
+		while((cluster_group = list_next(cluster_itr))) {
+			if(!strcmp(wckey->cluster, cluster_group->cluster)) 
+				break;
+		}
+		if(!cluster_group) {
+			cluster_group = 
+				xmalloc(sizeof(cluster_grouping_t));
+			cluster_group->cluster = xstrdup(wckey->cluster);
+			cluster_group->acct_list =
+				list_create(_destroy_acct_grouping);
+			list_append(cluster_list, cluster_group);
+		}
+
+		acct_itr = list_iterator_create(cluster_group->acct_list);
+		while((acct_group = list_next(acct_itr))) {
+			if(!strcmp(wckey->name, acct_group->acct))
+				break;
+		}
+		list_iterator_destroy(acct_itr);		
+			
+		if(!acct_group) {
+			uint32_t last_size = 0;
+			char *group = NULL;
+			acct_group = xmalloc(sizeof(acct_grouping_t));
+			acct_group->acct = xstrdup(wckey->name);
+			acct_group->lft = wckey->id;
+			acct_group->groups =
+				list_create(_destroy_local_grouping);
+			list_append(cluster_group->acct_list, acct_group);
+			while((group = list_next(group_itr))) {
+				local_group = xmalloc(sizeof(local_grouping_t));
+				local_group->jobs = list_create(NULL);
+				local_group->min_size = last_size;
+				last_size = atoi(group);
+				local_group->max_size = last_size-1;
+				list_append(acct_group->groups, local_group);
+			}
+			if(last_size) {
+				local_group = xmalloc(sizeof(local_grouping_t));
+				local_group->jobs = list_create(NULL);
+				local_group->min_size = last_size;
+				local_group->max_size = INFINITE;
+				list_append(acct_group->groups, local_group);
+			}
+			list_iterator_reset(group_itr);
+		}
+		list_iterator_reset(cluster_itr);
+	}
+	list_iterator_destroy(itr);
+no_assocs:
+	itr = list_iterator_create(job_list);
+
+	list_append_list(header_list, print_fields_list);
+	list_append_list(header_list, grouping_print_fields_list);
+
+	memset(&total_field, 0, sizeof(print_field_t));
+	total_field.type = PRINT_JOB_SIZE;
+	total_field.name = xstrdup("% of cluster");
+	total_field.len = 12;
+	total_field.print_routine = sreport_print_time;
+	list_append(header_list, &total_field);
+
+	print_fields_header(header_list);
+	list_destroy(header_list);
+
+	while((job = list_next(itr))) {
+		char *local_cluster = "UNKNOWN";
+		char *local_account = "UNKNOWN";
+
+		if(!job->elapsed) {
+			/* here we don't care about jobs that didn't
+			 * really run here */
+			continue;
+		}
+		if(job->cluster) 
+			local_cluster = job->cluster;
+		if(job->account) 
+			local_account = job->account;
+
+		list_iterator_reset(cluster_itr);
+		while((cluster_group = list_next(cluster_itr))) {
+			if(!strcmp(local_cluster, cluster_group->cluster)) 
+				break;
+		}
+		if(!cluster_group) {
+			/* here we are only looking for groups that
+			 * were added with the associations above
+			 */
+			continue;
+		}
+
+		acct_itr = list_iterator_create(cluster_group->acct_list);
+		while((acct_group = list_next(acct_itr))) {
+			if(!strcmp(job->wckey, acct_group->acct))
+				break;
+		}
+		list_iterator_destroy(acct_itr);		
+			
+		if(!acct_group) {
+			/* here we are only looking for groups that
+			 * were added with the associations above
+			 */
+			continue;
+		}
+
+		local_itr = list_iterator_create(acct_group->groups);
+		while((local_group = list_next(local_itr))) {
+			uint64_t total_secs = 0;
+			if((job->alloc_cpus < local_group->min_size)
+			   || (job->alloc_cpus > local_group->max_size))
+				continue;
+			list_append(local_group->jobs, job);
+			local_group->count++;
+			total_secs = (uint64_t)job->elapsed 
+				* (uint64_t)job->alloc_cpus;
+			local_group->cpu_secs += total_secs;
+			acct_group->cpu_secs += total_secs;
+			cluster_group->cpu_secs += total_secs;
+		}
+		list_iterator_destroy(local_itr);		
+	}
+	list_iterator_destroy(group_itr);
+	list_destroy(grouping_list);
+	list_iterator_destroy(itr);
+	
+//	time_format = SREPORT_TIME_PERCENT;
+	
+	itr = list_iterator_create(print_fields_list);
+	itr2 = list_iterator_create(grouping_print_fields_list);
+	list_sort(cluster_list, (ListCmpF)_sort_cluster_grouping_dec);
+	list_iterator_reset(cluster_itr);
+	while((cluster_group = list_next(cluster_itr))) {
+		list_sort(cluster_group->acct_list, 
+			  (ListCmpF)_sort_acct_grouping_dec);
+		acct_itr = list_iterator_create(cluster_group->acct_list);
+		while((acct_group = list_next(acct_itr))) {
+			
+			while((field = list_next(itr))) {
+				switch(field->type) {
+				case PRINT_JOB_CLUSTER:
+					field->print_routine(
+						field,
+						cluster_group->cluster, 0);
+					break;
+				case PRINT_JOB_WCKEY:
+					field->print_routine(field,
+							     acct_group->acct,
+							     0);
+					break;
+				default:
+					field->print_routine(field,
+							     NULL,
+							     0);
+					break;
+				}
+			}
+			list_iterator_reset(itr);
+			local_itr = list_iterator_create(acct_group->groups);
+			while((local_group = list_next(local_itr))) {
+				field = list_next(itr2);
+				switch(field->type) {
+				case PRINT_JOB_SIZE:
+					field->print_routine(
+						field,
+						local_group->cpu_secs,
+						acct_group->cpu_secs,
+						0);
+					break;
+				case PRINT_JOB_COUNT:
+					field->print_routine(
+						field,
+						local_group->count,
+						0);
+					break;
+				default:
+					field->print_routine(field,
+							     NULL,
+							     0);
+					break;
+				}
+			}
+			list_iterator_reset(itr2);
+			list_iterator_destroy(local_itr);
+			
+			temp_format = time_format;
+			time_format = SREPORT_TIME_PERCENT;
+			total_field.print_routine(&total_field,
+						  acct_group->cpu_secs,
+						  cluster_group->cpu_secs, 1);
+			time_format = temp_format;
+			printf("\n");
+		}
+		list_iterator_destroy(acct_itr);
+	}
+	list_iterator_destroy(itr);
+
+//	time_format = temp_time_format;
+
+end_it:
+	if(print_job_count)
+		print_job_count = 0;
+
+	destroy_acct_job_cond(job_cond);
+	
+	if(job_list) {
+		list_destroy(job_list);
 		job_list = NULL;
 	}
 	
+	if(wckey_list) {
+		list_destroy(wckey_list);
+		wckey_list = NULL;
+	}
+	
 	if(cluster_list) {
 		list_destroy(cluster_list);
 		cluster_list = NULL;
diff --git a/src/sreport/job_reports.h b/src/sreport/job_reports.h
index f4641967c..81bd19235 100644
--- a/src/sreport/job_reports.h
+++ b/src/sreport/job_reports.h
@@ -43,5 +43,6 @@
 #include "sreport.h"
 
 extern int job_sizes_grouped_by_top_acct(int argc, char *argv[]);
+extern int job_sizes_grouped_by_wckey(int argc, char *argv[]);
 
 #endif
diff --git a/src/sreport/sreport.c b/src/sreport/sreport.c
index 4136d4127..858be26d4 100644
--- a/src/sreport/sreport.c
+++ b/src/sreport/sreport.c
@@ -75,7 +75,7 @@ main (int argc, char *argv[])
 	int error_code = SLURM_SUCCESS, i, opt_char, input_field_count;
 	char **input_fields;
 	log_options_t opts = LOG_OPTS_STDERR_ONLY ;
-
+	char *temp = NULL;
 	int option_index;
 	static struct option long_options[] = {
 		{"all_clusters", 0, 0, 'a'},
@@ -99,6 +99,20 @@ main (int argc, char *argv[])
 	quiet_flag        = 0;
 	log_init("sreport", opts, SYSLOG_FACILITY_DAEMON, NULL);
 
+	/* Check to see if we are running a supported accounting plugin */
+	temp = slurm_get_accounting_storage_type();
+	if(strcasecmp(temp, "accounting_storage/slurmdbd")
+	   && strcasecmp(temp, "accounting_storage/mysql")) {
+		fprintf (stderr, "You are not running a supported "
+			 "accounting_storage plugin\n(%s).\n"
+			 "Only 'accounting_storage/slurmdbd' "
+			 "and 'accounting_storage/mysql' are supported.\n",
+			temp);
+		xfree(temp);
+		exit(1);
+	}
+	xfree(temp);
+
 	while((opt_char = getopt_long(argc, argv, "ahnpPqs:t:vV",
 			long_options, &option_index)) != -1) {
 		switch (opt_char) {
@@ -211,16 +225,22 @@ getline(const char *prompt)
 static void _job_rep (int argc, char *argv[]) 
 {
 	int error_code = SLURM_SUCCESS;
+	int command_len = strlen(argv[0]);
 
-	/* First identify the entity to add */
-	if (strncasecmp (argv[0], "Sizes", 1) == 0) {
+	/* For backwards compatibility we just look at the 1st char
+	 * by default since Sizes was the original name */
+	if (!strncasecmp (argv[0], "SizesByAccount", MAX(command_len, 1))) {
 		error_code = job_sizes_grouped_by_top_acct(
 			(argc - 1), &argv[1]);
+	} else if (!strncasecmp (argv[0], 
+				 "SizesByWcKey", MAX(command_len, 8))) {
+		error_code = job_sizes_grouped_by_wckey(
+			(argc - 1), &argv[1]);
 	} else {
 		exit_code = 1;
 		fprintf(stderr, "Not valid report %s\n", argv[0]);
 		fprintf(stderr, "Valid job reports are, ");
-		fprintf(stderr, "\"Sizes\"\n");
+		fprintf(stderr, "\"SizesByAccount, and SizesByWckey\"\n");
 	}
 	
 	if (error_code) {
@@ -519,13 +539,13 @@ static int _set_time_format(char *format)
 
 	if (strncasecmp (format, "SecPer", MAX(command_len, 6)) == 0) {
 		time_format = SREPORT_TIME_SECS_PER;
-		time_format_string = "Seconds/Percentange of Total";
+		time_format_string = "Seconds/Percentage of Total";
 	} else if (strncasecmp (format, "MinPer", MAX(command_len, 6)) == 0) {
 		time_format = SREPORT_TIME_MINS_PER;
-		time_format_string = "Minutes/Percentange of Total";
+		time_format_string = "Minutes/Percentage of Total";
 	} else if (strncasecmp (format, "HourPer", MAX(command_len, 6)) == 0) {
 		time_format = SREPORT_TIME_HOURS_PER;
-		time_format_string = "Hours/Percentange of Total";
+		time_format_string = "Hours/Percentage of Total";
 	} else if (strncasecmp (format, "Seconds", MAX(command_len, 1)) == 0) {
 		time_format = SREPORT_TIME_SECS;
 		time_format_string = "Seconds";
@@ -537,7 +557,7 @@ static int _set_time_format(char *format)
 		time_format_string = "Hours";
 	} else if (strncasecmp (format, "Percent", MAX(command_len, 1)) == 0) {
 		time_format = SREPORT_TIME_PERCENT;
-		time_format_string = "Percentange of Total";
+		time_format_string = "Percentage of Total";
 	} else {
 		fprintf (stderr, "unknown time format %s", format);	
 		return SLURM_ERROR;
@@ -602,7 +622,7 @@ sreport [<OPTION>] [<COMMAND>]                                             \n\
   <REPORT> is different for each report type.                              \n\
      cluster - AccountUtilizationByUser, UserUtilizationByAccount,         \n\
                UserUtilizationByWckey, Utilization, WCKeyUtilizationByUser \n\
-     job     - Sizes                                                       \n\
+     job     - SizesByAccount, SizesByWckey                                \n\
      user    - TopUsage                                                    \n\
                                                                            \n\
   <OPTIONS> are different for each report type.                            \n\
@@ -610,6 +630,8 @@ sreport [<OPTION>] [<COMMAND>]                                             \n\
      COMMON FOR ALL TYPES                                                  \n\
              - All_Clusters     - Use all monitored clusters default is    \n\
                                   local cluster.                           \n\
+             - Clusters=<OPT>   - List of clusters to include in report    \n\
+                                  Default is local cluster.                \n\
              - End=<OPT>        - Period ending for report.                \n\
                                   Default is 23:59:59 of previous day.     \n\
              - Format=<OPT>     - Comma separated list of fields to display\n\
@@ -617,17 +639,35 @@ sreport [<OPTION>] [<COMMAND>]                                             \n\
              - Start=<OPT>      - Period start for report.                 \n\
                                   Default is 00:00:00 of previous day.     \n\
                                                                            \n\
-     cluster - Names=<OPT>      - List of clusters to include in report    \n\
-                                  Default is local cluster.                \n\
+     cluster - Accounts=<OPT>   - When used with the UserUtilizationByAccount,\n\
+                                  or AccountUtilizationByUser, List of accounts\n\
+                                  to include in report.  Default is all.   \n\
              - Tree             - When used with the AccountUtilizationByUser\n\
                                   report will span the accounts as they    \n\
                                   in the hierarchy.                        \n\
+             - Users=<OPT>      - When used with any report other than     \n\
+                                  Utilization, List of users to include in \n\
+                                  report.  Default is all.                 \n\
+             - Wckeys=<OPT>     - When used with the UserUtilizationByWckey\n\
+                                  or WCKeyUtilizationByUser, List of wckeys\n\
+                                  to include in report.  Default is all.   \n\
                                                                            \n\
      job     - Accounts=<OPT>   - List of accounts to use for the report   \n\
-                                  Default is all.                          \n\
-             - Clusters=<OPT>   - List of clusters to include in report.   \n\
-                                  Default is local cluster.                \n\
-             - GID=<OPT>        - List of group ids to include in report   \n\
+                                  Default is all.  The SizesbyAccount      \n\
+                                  report only displays 1 hierarchical level.\n\
+                                  If accounts are specified the next layer \n\
+                                  of accounts under those specified will be\n\
+                                  displayed, not the accounts specified.   \n\
+                                  In the SizesByAccount reports the default\n\
+                                  for accounts is root.  This explanation  \n\
+                                  does not apply when ran with the FlatView\n\
+                                  option.                                  \n\
+             - FlatView         - When used with the SizesbyAccount        \n\
+                                  will not group accounts in a             \n\
+                                  hierarchical level, but print each       \n\
+                                  account where jobs ran on a separate     \n\
+                                  line without any hierarchy.              \n\
+             - GID=<OPT>        - List of group ids to include in report.  \n\
                                   Default is all.                          \n\
              - Grouping=<OPT>   - Comma separated list of size groupings.  \n\
                                   (i.e. 50,100,150 would group job cpu count\n\
@@ -636,15 +676,19 @@ sreport [<OPTION>] [<COMMAND>]                                             \n\
                                   Default is all.                          \n\
              - Partitions=<OPT> - List of partitions jobs ran on to include\n\
                                   in report.  Default is all.              \n\
-             - PrintJobCount    - When used with the Sizes report will print\n\
-                                  number of jobs ran instead of time used. \n\
+             - PrintJobCount    - When used with the any Sizes report      \n\
+                                  will print number of jobs ran instead of \n\
+                                  time used.                               \n\
              - Users=<OPT>      - List of users jobs to include in report. \n\
                                   Default is all.                          \n\
+             - Wckeys=<OPT>     - List of wckeys to use for the report.    \n\
+                                  Default is all.  The SizesbyWckey        \n\
+                                  report all users summed together.  If    \n\
+                                  you want only certain users specify them \n\
+                                  them with the Users= option.             \n\
                                                                            \n\
      user    - Accounts=<OPT>   - List of accounts to use for the report   \n\
                                   Default is all.                          \n\
-             - Clusters=<OPT>   - List of clusters to include in report.   \n\
-                                  Default is local cluster.                \n\
              - Group            - Group all accounts together for each user.\n\
                                   Default is a separate entry for each user\n\
                                   and account reference.                   \n\
@@ -654,6 +698,35 @@ sreport [<OPTION>] [<COMMAND>]                                             \n\
                                   Default is all.                          \n\
                                                                            \n\
                                                                            \n\
+  Below are the format options for each report.                            \n\
+                                                                           \n\
+       Cluster                                                             \n\
+       - AccountUtilizationByUser                                          \n\
+       - UserUtilizationByAccount                                          \n\
+             - Accounts, Cluster, CPUCount, Login, Proper, Used            \n\
+       - UserUtilizationByWckey                                            \n\
+       - WCKeyUtilizationByUser                                            \n\
+             - Cluster, CPUCount, Login, Proper, Used, Wckey               \n\
+       - Utilization                                                       \n\
+             - Allocated, Cluster, CPUCount, Down, Idle, Overcommited,     \n\
+               Reported, Reserved                                          \n\
+                                                                           \n\
+       Job                                                                 \n\
+       - Sizes                                                             \n\
+             - Account, Cluster                                            \n\
+                                                                           \n\
+       User                                                                \n\
+       - TopUsage                                                          \n\
+             - Account, Cluster, Login, Proper, Used                       \n\
+                                                                           \n\
+                                                                           \n\
+                                                                           \n\
+  Note, valid start/end time formats are...                                \n\
+       HH:MM[:SS] [AM|PM]                                                  \n\
+       MMDD[YY] or MM/DD[/YY] or MM.DD[.YY]                                \n\
+       MM/DD[/YY]-HH:MM[:SS]                                               \n\
+                                                                           \n\
+                                                                           \n\
   All commands and options are case-insensitive.                         \n\n");
 	
 }
diff --git a/src/sreport/user_reports.c b/src/sreport/user_reports.c
index 784cc63fa..09537f117 100644
--- a/src/sreport/user_reports.c
+++ b/src/sreport/user_reports.c
@@ -75,7 +75,7 @@ static int _set_cond(int *start, int argc, char *argv[],
 		user_cond->assoc_cond->with_usage = 1;
 	}
 	assoc_cond = user_cond->assoc_cond;
-
+	
 	if(!assoc_cond->cluster_list)
 		assoc_cond->cluster_list = list_create(slurm_destroy_char);
 	for (i=(*start); i<argc; i++) {
@@ -85,13 +85,7 @@ static int _set_cond(int *start, int argc, char *argv[],
 		else
 			command_len=end-1;
 
-		if (!strncasecmp (argv[i], "Set", MAX(command_len, 3))) {
-			i--;
-			break;
-		} else if(!end && !strncasecmp(argv[i], "where",
-					       MAX(command_len, 5))) {
-			continue;
-		} else if(!end && !strncasecmp(argv[i], "all_clusters", 
+		if(!end && !strncasecmp(argv[i], "all_clusters", 
 					       MAX(command_len, 1))) {
 			local_cluster_flag = 1;
 			continue;
@@ -270,6 +264,8 @@ extern int user_top(int argc, char *argv[])
 
 	_set_cond(&i, argc, argv, user_cond, format_list);
 
+	user_cond->assoc_cond->without_parent_info = 1;
+
 	if(!list_count(format_list)) 
 		slurm_addto_char_list(format_list, "Cl,L,P,A,U");
 
diff --git a/src/srun/opt.c b/src/srun/opt.c
index 312c13d99..9a0f04460 100644
--- a/src/srun/opt.c
+++ b/src/srun/opt.c
@@ -748,6 +748,7 @@ env_vars_t env_vars[] = {
 {"SLURM_KILL_BAD_EXIT", OPT_INT,        &opt.kill_bad_exit, NULL             },
 {"SLURM_LABELIO",       OPT_INT,        &opt.labelio,       NULL             },
 {"SLURM_LINUX_IMAGE",   OPT_STRING,     &opt.linuximage,    NULL             },
+{"SLURM_CNLOAD_IMAGE",  OPT_STRING,     &opt.linuximage,    NULL             },
 {"SLURM_MLOADER_IMAGE", OPT_STRING,     &opt.mloaderimage,  NULL             },
 {"SLURM_NNODES",        OPT_NODES,      NULL,               NULL             },
 {"SLURM_NSOCKETS_PER_NODE",OPT_NSOCKETS,NULL,               NULL             },
@@ -758,6 +759,7 @@ env_vars_t env_vars[] = {
 {"SLURM_OVERCOMMIT",    OPT_OVERCOMMIT, NULL,               NULL             },
 {"SLURM_PARTITION",     OPT_STRING,     &opt.partition,     NULL             },
 {"SLURM_RAMDISK_IMAGE", OPT_STRING,     &opt.ramdiskimage,  NULL             },
+{"SLURM_IOLOAD_IMAGE",  OPT_STRING,     &opt.ramdiskimage,  NULL             },
 {"SLURM_REMOTE_CWD",    OPT_STRING,     &opt.cwd,           NULL             },
 {"SLURM_STDERRMODE",    OPT_STRING,     &opt.efname,        NULL             },
 {"SLURM_STDINMODE",     OPT_STRING,     &opt.ifname,        NULL             },
@@ -1034,8 +1036,10 @@ static void set_options(const int argc, char **argv)
 		{"tasks-per-node",   required_argument, 0, LONG_OPT_NTASKSPERNODE},
 		{"blrts-image",      required_argument, 0, LONG_OPT_BLRTS_IMAGE},
 		{"linux-image",      required_argument, 0, LONG_OPT_LINUX_IMAGE},
+		{"cnload-image",     required_argument, 0, LONG_OPT_LINUX_IMAGE},
 		{"mloader-image",    required_argument, 0, LONG_OPT_MLOADER_IMAGE},
 		{"ramdisk-image",    required_argument, 0, LONG_OPT_RAMDISK_IMAGE},
+		{"ioload-image",     required_argument, 0, LONG_OPT_RAMDISK_IMAGE},
 		{"reboot",           no_argument,       0, LONG_OPT_REBOOT},            
 		{"get-user-env",     optional_argument, 0, LONG_OPT_GET_USER_ENV},
 		{"pty",              no_argument,       0, LONG_OPT_PTY},
@@ -2152,14 +2156,24 @@ static void _opt_list()
 	info("reboot         : %s", opt.reboot ? "no" : "yes");
 	info("rotate         : %s", opt.no_rotate ? "yes" : "no");
 	
+#ifdef HAVE_BGL
 	if (opt.blrtsimage)
 		info("BlrtsImage     : %s", opt.blrtsimage);
+#endif
 	if (opt.linuximage)
+#ifdef HAVE_BGL
 		info("LinuxImage     : %s", opt.linuximage);
+#else
+		info("CnloadImage    : %s", opt.linuximage);
+#endif
 	if (opt.mloaderimage)
 		info("MloaderImage   : %s", opt.mloaderimage);
 	if (opt.ramdiskimage)
+#ifdef HAVE_BGL
 		info("RamDiskImage   : %s", opt.ramdiskimage);
+#else
+		info("IoloadImage   : %s", opt.ramdiskimage);
+#endif
 
 	info("network        : %s", opt.network);
 	info("propagate      : %s",
@@ -2217,14 +2231,19 @@ static void _usage(void)
 "            [--ntasks-per-core=n] [--mem-per-cpu=MB]\n"
 #ifdef HAVE_BG		/* Blue gene specific options */
 "            [--geometry=XxYxZ] [--conn-type=type] [--no-rotate] [--reboot]\n"
+#ifdef HAVE_BGL
 "            [--blrts-image=path] [--linux-image=path]\n"
 "            [--mloader-image=path] [--ramdisk-image=path]\n"
+#else
+"            [--cnload-image=path]\n"
+"            [--mloader-image=path] [--ioload-image=path]\n"
 #endif
-		"            [--mail-type=type] [--mail-user=user] [--nice[=value]]\n"
-		"            [--prolog=fname] [--epilog=fname]\n"
-		"            [--task-prolog=fname] [--task-epilog=fname]\n"
-		"            [--ctrl-comm-ifhn=addr] [--multi-prog]\n"
-		"            [-w hosts...] [-x hosts...] executable [args...]\n");
+#endif
+"            [--mail-type=type] [--mail-user=user] [--nice[=value]]\n"
+"            [--prolog=fname] [--epilog=fname]\n"
+"            [--task-prolog=fname] [--task-epilog=fname]\n"
+"            [--ctrl-comm-ifhn=addr] [--multi-prog]\n"
+"            [-w hosts...] [-x hosts...] executable [args...]\n");
 }
 
 static void _help(void)
@@ -2358,12 +2377,22 @@ static void _help(void)
 		"      --reboot                reboot block before starting job\n"
 		"      --conn-type=type        constraint on type of connection, MESH or TORUS\n"
 		"                              if not set, then tries to fit TORUS else MESH\n"
-		"      --blrts-image=path      path to blrts image for bluegene block.  Default if not set\n"
-		"      --linux-image=path      path to linux image for bluegene block.  Default if not set\n"
-		"      --mloader-image=path    path to mloader image for bluegene block.  Default if not set\n"
-		"      --ramdisk-image=path    path to ramdisk image for bluegene block.  Default if not set\n"
-		"\n"
+#ifndef HAVE_BGL
+		"                              If wanting to run in HTC mode (only for 1\n"
+		"                              midplane and below).  You can use HTC_S for\n"
+		"                              SMP, HTC_D for Dual, HTC_V for\n"
+		"                              virtual node mode, and HTC_L for Linux mode.\n" 
+                "      --cnload-image=path     path to compute node image for bluegene block.  Default if not set\n"
+                "      --mloader-image=path    path to mloader image for bluegene block.  Default if not set\n"
+                "      --ioload-image=path     path to ioload image for bluegene block.  Default if not set\n"
+#else
+                "      --blrts-image=path      path to blrts image for bluegene block.  Default if not set\n"
+                "      --linux-image=path      path to linux image for bluegene block.  Default if not set\n"
+                "      --mloader-image=path    path to mloader image for bluegene block.  Default if not set\n"
+                "      --ramdisk-image=path    path to ramdisk image for bluegene block.  Default if not set\n"
 #endif
+#endif
+		"\n"
 		"Help options:\n"
 		"      --help                  show this help message\n"
 		"      --usage                 display brief usage message\n"
diff --git a/src/srun/srun.c b/src/srun/srun.c
index 836b7eba5..1f0fc7f1b 100644
--- a/src/srun/srun.c
+++ b/src/srun/srun.c
@@ -406,6 +406,7 @@ int srun(int ac, char **av)
 	if (slurm_step_launch(job->step_ctx, slurmctld_comm_addr.hostname, 
 	    &launch_params, &callbacks) != SLURM_SUCCESS) {
 		error("Application launch failed: %m");
+		global_rc = 1;
 		goto cleanup;
 	}
 
diff --git a/src/sstat/options.c b/src/sstat/options.c
index c1208fbb6..531679409 100644
--- a/src/sstat/options.c
+++ b/src/sstat/options.c
@@ -66,6 +66,7 @@ void _help_msg(void)
 	printf("\n"
 	       "By default, sstat displays status data for job/step stated\n"
 	       "Options:\n"
+	       "-a, --allsteps\n"
 	       "-C, --cluster\n"
 	       "    Job is running on this cluster.\n"
 	       "-F <field-list>, --fields=<field-list>\n"
@@ -265,6 +266,7 @@ void parse_command_line(int argc, char **argv)
 	log_options_t logopt = LOG_OPTS_STDERR_ONLY;
 
 	static struct option long_options[] = {
+		{"allsteps", 0, 0, 'a'},
 		{"cluster", 1, 0, 'C'},
 		{"fields", 1, 0, 'F'},
 		{"help", 0, &params.opt_help, 1},
@@ -283,11 +285,14 @@ void parse_command_line(int argc, char **argv)
 	opterr = 1;		/* Let getopt report problems to the user */
 
 	while (1) {		/* now cycle through the command line */
-		c = getopt_long(argc, argv, "F:hj:Vv",
+		c = getopt_long(argc, argv, "aF:hj:Vv",
 				long_options, &optionIndex);
 		if (c == -1)
 			break;
 		switch (c) {
+		case 'a':
+			params.opt_all_steps = 1;
+			break;
 		case 'F':
 			if(params.opt_field_list)
 				xfree(params.opt_field_list);
diff --git a/src/sstat/sstat.c b/src/sstat/sstat.c
index ac6963617..38b76ce09 100644
--- a/src/sstat/sstat.c
+++ b/src/sstat/sstat.c
@@ -244,7 +244,23 @@ int main(int argc, char **argv)
 	while((selected_step = list_next(itr))) {
 		if(selected_step->stepid != NO_VAL)
 			stepid = selected_step->stepid;
-		else
+		else if(params.opt_all_steps) {
+			job_step_info_response_msg_t *step_ptr = NULL;
+			int i = 0;
+			if(slurm_get_job_steps(
+				   0, selected_step->jobid, 0, 
+				   &step_ptr, SHOW_ALL)) {
+				error("couldn't get steps for job %u",
+				      selected_step->jobid);
+				continue;
+			}
+			for (i = 0; i < step_ptr->job_step_count; i++) {
+				_do_stat(selected_step->jobid, 
+					 step_ptr->job_steps[i].step_id);
+			}
+			slurm_free_job_step_info_response_msg(step_ptr);
+			continue;
+		} else 
 			stepid = 0;
 		_do_stat(selected_step->jobid, stepid);
 	}
diff --git a/src/sstat/sstat.h b/src/sstat/sstat.h
index 2e1f81524..21559444b 100644
--- a/src/sstat/sstat.h
+++ b/src/sstat/sstat.h
@@ -85,6 +85,7 @@ typedef enum {	HEADLINE,
 } type_t;
 
 typedef struct {
+	int opt_all_steps;	/* --allsteps */
 	char *opt_field_list;	/* --fields= */
 	int opt_help;		/* --help */
 	List opt_job_list;	/* --jobs */
diff --git a/src/sview/block_info.c b/src/sview/block_info.c
index 5d6bb3125..1ae5873e8 100644
--- a/src/sview/block_info.c
+++ b/src/sview/block_info.c
@@ -73,7 +73,9 @@ enum {
 	SORTID_RAMDISKIMAGE,
 	SORTID_STATE,
 	SORTID_UPDATED, 
+#ifdef HAVE_BGL
 	SORTID_USE,
+#endif
 	SORTID_USER,
 	SORTID_CNT
 };
@@ -195,6 +197,7 @@ static int _in_slurm_partition(int *part_inx, int *bp_inx)
 
 static char* _convert_conn_type(enum connection_type conn_type)
 {
+#ifdef HAVE_BG
 	switch (conn_type) {
 	case (SELECT_MESH):
 		return "MESH";
@@ -204,7 +207,24 @@ static char* _convert_conn_type(enum connection_type conn_type)
 		return "SMALL";
 	case (SELECT_NAV):
 		return "NAV";
+#ifndef HAVE_BGL
+	case SELECT_HTC_S:
+		return "HTC_S";
+		break;
+	case SELECT_HTC_D:
+		return "HTC_D";
+		break;
+	case SELECT_HTC_V:
+		return "HTC_V";
+		break;
+	case SELECT_HTC_L:
+		return "HTC_L";
+		break;
+#endif
+	default:
+		return "?";
 	}
+#endif
 	return "?";
 }
 
diff --git a/src/sview/common.c b/src/sview/common.c
index aecce533c..65bba1648 100644
--- a/src/sview/common.c
+++ b/src/sview/common.c
@@ -64,8 +64,7 @@ static int _sort_iter_compare_func_char(GtkTreeModel *model,
 	gtk_tree_model_get(model, a, sortcol, &name1, -1);
 	gtk_tree_model_get(model, b, sortcol, &name2, -1);
 	
-	if (name1 == NULL || name2 == NULL)
-	{
+	if (name1 == NULL || name2 == NULL) {
 		if (name1 == NULL && name2 == NULL)
 			goto cleanup; /* both equal => ret = 0 */
 		
@@ -120,6 +119,52 @@ static int _sort_iter_compare_func_int(GtkTreeModel *model,
 	return ret;
 }
 
+static int _sort_iter_compare_func_nodes(GtkTreeModel *model,
+					 GtkTreeIter  *a,
+					 GtkTreeIter  *b,
+					 gpointer      userdata)
+{
+	int sortcol = GPOINTER_TO_INT(userdata);
+	int ret = 0;
+	gchar *name1 = NULL, *name2 = NULL;
+	
+	gtk_tree_model_get(model, a, sortcol, &name1, -1);
+	gtk_tree_model_get(model, b, sortcol, &name2, -1);
+	
+	if (name1 == NULL || name2 == NULL) {
+		if (name1 == NULL && name2 == NULL)
+			goto cleanup; /* both equal => ret = 0 */
+		
+		ret = (name1 == NULL) ? -1 : 1;
+	} else {
+		uint64_t int1 = atoi(name1);
+		uint64_t int2 = atoi(name2);
+		if(strchr(name1, 'K')) {
+			int1 *= 1024;
+		} else if(strchr(name1, 'M')) {
+			int1 *= 1048576;
+		} else if(strchr(name1, 'G')) {
+			int1 *= 1073741824;
+		}
+
+		if(strchr(name2, 'K')) {
+			int2 *= 1024;
+		} else if(strchr(name2, 'M')) {
+			int2 *= 1048576;
+		} else if(strchr(name2, 'G')) {
+			int2 *= 1073741824;
+		}
+
+		if (int1 != int2)
+			ret = (int1 > int2) ? 1 : -1;		
+	}
+cleanup:
+	g_free(name1);
+	g_free(name2);
+	
+	return ret;
+}
+
 static void _editing_started(GtkCellRenderer *cell,
 			     GtkCellEditable *editable,
 			     const gchar     *path,
@@ -546,7 +591,7 @@ extern GtkTreeStore *create_treestore(GtkTreeView *tree_view,
 	
 	treestore = gtk_tree_store_newv(count, types);
 	if(!treestore) {
-		g_error("Can't great treestore.\n");
+		g_error("Can't create treestore.\n");
 		return NULL;
 	}
 	
@@ -567,13 +612,23 @@ extern GtkTreeStore *create_treestore(GtkTreeView *tree_view,
 			
 			break;
 		case G_TYPE_STRING:
-			gtk_tree_sortable_set_sort_func(
-				GTK_TREE_SORTABLE(treestore), 
-				display_data[i].id, 
-				_sort_iter_compare_func_char,
-				GINT_TO_POINTER(display_data[i].id), 
-				NULL); 
-			break;
+			if(!strcasecmp(display_data[i].name, "Nodes")) {
+				gtk_tree_sortable_set_sort_func(
+					GTK_TREE_SORTABLE(treestore), 
+					display_data[i].id, 
+					_sort_iter_compare_func_nodes,
+					GINT_TO_POINTER(display_data[i].id), 
+					NULL); 
+				break;
+			} else {
+				gtk_tree_sortable_set_sort_func(
+					GTK_TREE_SORTABLE(treestore), 
+					display_data[i].id, 
+					_sort_iter_compare_func_char,
+					GINT_TO_POINTER(display_data[i].id), 
+					NULL); 
+				break;
+			}
 		default:
 			g_print("unknown type %d",
 				(int)display_data[i].type);
diff --git a/testsuite/expect/test1.59 b/testsuite/expect/test1.59
index a6f7256e5..6cb16f1c9 100755
--- a/testsuite/expect/test1.59
+++ b/testsuite/expect/test1.59
@@ -97,7 +97,6 @@ expect {
 		send_user "\nFAILURE: srun not responding\n"
 		kill_srun
 		set exit_code 1
-		exp_continue
 	}
 	eof {
 		wait
diff --git a/testsuite/expect/test12.2 b/testsuite/expect/test12.2
index 6d50f4fba..6c5efad94 100755
--- a/testsuite/expect/test12.2
+++ b/testsuite/expect/test12.2
@@ -269,12 +269,13 @@ if {$matches < 4} {
 set elapsed_time 0
 spawn $sacct --noheader  --job=$job_id.0 --fields elapsed
 expect {
-	-re "($number):(\[0-9\])(\[0-9\])" {
-		set mins $expect_out(1,string)
-		set sec_ten $expect_out(2,string)
-		set sec_one $expect_out(3,string)
+	-re "($number):($number):(\[0-9\])(\[0-9\])" {
+		set hours $expect_out(1,string)
+		set mins $expect_out(2,string)
+		set sec_ten $expect_out(3,string)
+		set sec_one $expect_out(4,string)
 		set secs [expr $sec_ten * 10 + $sec_one]
-		set elapsed_time [expr $mins * 60 + $secs]
+		set elapsed_time [expr ($hours * 3600) + ($mins * 60) + $secs]
 		exp_continue
 	}
 	timeout {
diff --git a/testsuite/expect/test19.5 b/testsuite/expect/test19.5
index c64181b24..d7f302ef8 100755
--- a/testsuite/expect/test19.5
+++ b/testsuite/expect/test19.5
@@ -67,7 +67,7 @@ exec $strigger --clear --quiet --user=$uid
 #
 # Build input script file and submit a job
 #
-make_bash_script $file_in "sleep 60"
+make_bash_script $file_in "$srun sleep 60"
 
 set job_id 0
 spawn $sbatch --output=/dev/null -t2 $file_in
-- 
GitLab