diff --git a/AUTHORS b/AUTHORS
index f12487efb9583473d7c5e99f93742dc94969e357..570f3dc3fa80c839384459260948aa3b9306bd18 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -10,6 +10,7 @@ Gilles Civario <gilles.civario(at)bull.net>
 Chris Dunlap <cdunlap(at)llnl.gov>
 Joey Ekstrom <ekstrom1(at)llnl.gov>
 Jim Garlick <garlick(at)llnl.gov>
+Didier Gazen <gazdi(at)aero.obs-mip.fr>
 Mark Grondona <mgrondona(at)llnl.gov>
 Takao Hatazaki <takao.hatazaki(at)hp.com>
 Matthieu Hautreux <matthieu.hautreux(at)cea.fr>
@@ -30,6 +31,7 @@ Gennaro Oliva <oliva.g(at)na.icar.cnr.it>
 Daniel Palermo <dan.palermo(at)hp.com>
 Dan Phung <phung4(at)llnl.gov>
 Ashley Pitman <ashley(at)quadrics.com>
+Vijay Ramasubramanian <vram0@umd.edu>
 Andy Riebs <Andy.Riebs(at)hp.com>
 Asier Roa <asier.roa(at)bsc.es>
 Miguel Ros <miguel.ros(at)bsc.es>
diff --git a/BUILD.NOTES b/BUILD.NOTES
index b8eff4b6315fc011b79d908c897984aa1d9b5226..bc0dd780148ee66689f2e69f65738a790b4f99d1 100644
--- a/BUILD.NOTES
+++ b/BUILD.NOTES
@@ -107,6 +107,7 @@ To build and run on AIX:
 	%_slurm_sysconfdir      %{_prefix}/etc/slurm
         %_defaultdocdir         %{_prefix}/doc
 	%_with_debug            1
+	%_with_aix		1
 	%with_ssl               "--with-ssl=/opt/freeware"
 	%with_munge             "--with-munge=/opt/freeware"
 	%with_proctrack         "--with-proctrack=/admin/llnl/include"
diff --git a/META b/META
index 21ee366df29d9bf63634a01bb6dd397419d92fdd..41e3463a9daac527f86d4f9eba7c42ee29f96ea3 100644
--- a/META
+++ b/META
@@ -3,9 +3,9 @@
   Api_revision:  0
   Major:         1
   Meta:          1
-  Micro:         3
+  Micro:         4
   Minor:         3
   Name:          slurm
   Release:       1
-  Release_tags:  
-  Version:       1.3.3
+  Release_tags:  dist
+  Version:       1.3.4
diff --git a/NEWS b/NEWS
index 83bf170afe4cb20a09f357648c0496885a10763d..bf08c977106b5138c7395efe5a4c0b5b490027ad 100644
--- a/NEWS
+++ b/NEWS
@@ -1,6 +1,58 @@
 This file describes changes in recent versions of SLURM. It primarily
 documents those changes that are of interest to users and admins.
 
+* Changes in SLURM 1.3.4
+========================
+ -- Some updates to man page formatting from Gennaro Oliva, ICAR.
+ -- Smarter loading of plugins (doesn't stat every file in the plugin dir)
+ -- In sched/backfill avoid trying to schedule jobs on DOWN or DRAINED nodes.
+ -- forward exit_code from step completion to slurmdbd
+ -- Add retry logic to socket connect() call from client which can fail 
+    when the slurmctld is under heavy load.
+ -- Fixed bug when adding associations to add correctly.
+ -- Added support for associations for user root.
+ -- For Moab, sbatch --get-user-env option processed by slurmd daemon
+    rather than the sbatch command itself to permit faster response
+    for Moab.
+ -- IMPORTANT FIX: This only effects use of select/cons_res when allocating
+    resources by core or socket, not by CPU (default for SelectTypeParameter). 
+    We are not saving a pending job's task distribution, so after restarting
+    slurmctld, select/cons_res was over-allocating resources based upon an 
+    invalid task distribution value. Since we can't save the value without 
+    changing the state save file format, we'll just set it to the default 
+    value for now and save it in Slurm v1.4. This may result in a slight 
+    variation on how sockets and cores are allocated to jobs, but at least 
+    resources will not be over-allocated.
+ -- Correct logic in accumulating resources by node weight when more than 
+    one job can run per node (select/cons_res or partition shared=yes|force).
+ -- slurm.spec file updated to avoid creating empty RPMs. RPM now *must* be
+    built with correct specification of which packages to build or not build.
+    See the top of the slurm.spec file for information about how to control
+    package building specification.
+ -- Set SLURM_JOB_CPUS_PER_NODE for jobs allocated using the srun command.
+    It was already set for salloc and sbatch commands.
+ -- Fix to handle suspended jobs that were cancelled in accounting
+ -- BLUEGENE - fix to only include bps given in a name from the bluegene.conf 
+    file.
+ -- For select/cons_res: Fix record-keeping for core allocations when more 
+    than one partition uses a node or there is more than one socket per node.
+ -- In output for "scontrol show job" change "StartTime" header to "EligibleTime"
+    for pending jobs to accurately describe what is reported.
+ -- Add more slurmdbd.conf paramters: ArchiveScript, ArchiveAge, JobPurge, and
+    StepPurge (not fully implemented yet).
+ -- Add slurm.conf parameter EnforcePartLimits to reject jobs which exceed a
+    partition's size and/or time limits rather than leaving them queued for a
+    later change in the partition's limits. NOTE: Not reported by
+    "scontrol show config" to avoid changing RPCs. It will be reported in 
+    SLURM version 1.4.
+ -- Added idea of coordinator to accounting.  A coordinator can add associations
+    between exsisting users to the account or any sub-account they are 
+    coordinator to.  They can also add/remove other coordinators to those 
+    accounts.
+ -- Add support for Hostname and NodeHostname in slurm.conf being fully 
+    qualified domain names (by Vijay Ramasubramanian, University of Maryland). 
+    For more information see "man slurm.conf".
+
 * Changes in SLURM 1.3.3
 ========================
  -- Add mpi_openmpi plugin to the main SLURM RPM.
@@ -304,8 +356,30 @@ documents those changes that are of interest to users and admins.
     Moved existing digital signature logic into new plugin: crypto/openssl.
     Added new support for crypto/munge (available with GPL license).
 
+* Changes in SLURM 1.2.32
+=========================
+ -- Disable scancel of job in RootOnly partition only for sched/wiki2 (Moab).
+
 * Changes in SLURM 1.2.31
 =========================
+ -- For Moab only: If GetEnvTimeout=0 in slurm.conf then do not run "su" to get
+    the user's environment, only use the cache file.
+ -- For sched/wiki2 (Moab), treat the lack of a wiki.conf file or the lack 
+    of a configured AuthKey as a fatal error (lacks effective security).
+ -- For sched/wiki and sched/wiki2 (Maui or Moab) report a node's state as 
+    Busy rather than Running when allocated if SelectType=select/linear. Moab
+    was trying to schedule job's on nodes that were already allocated to jobs
+    that were hidden from it via the HidePartitionJobs in Slurm's wiki.conf.
+ -- In select/cons_res improve the resource selection when a job has specified
+    a processor count along with a maximum node count.
+ -- For an srun command with --ntasks-per-node option and *no* --ntasks count,
+    spawn a task count equal to the number of nodes selected multiplied by the 
+    --ntasks-per-node value.
+ -- In jobcomp/script: Set TZ if set in slurmctld's environment.
+ -- In srun with --verbose option properly format CPU allocation information 
+    logged for clusters with 1000+ nodes and 10+ CPUs per node.
+ -- Process a job's --mail_type=end option on any type of job termination, not
+    just normal completion (e.g. all failure modes too).
 
 * Changes in SLURM 1.2.30
 =========================
@@ -3225,4 +3299,4 @@ documents those changes that are of interest to users and admins.
  -- Change directory to /tmp in slurmd if daemonizing.
  -- Logfiles are reopened on reconfigure.
  
-$Id: NEWS 14153 2008-05-29 16:55:52Z jette $
+$Id: NEWS 14322 2008-06-23 22:01:33Z da $
diff --git a/RELEASE_NOTES b/RELEASE_NOTES
index 858d9ebfdf30dcbb8d39d762ddf6f7eab2c7438a..e07726a06075fc88acc3c0833bb575d3201a4c9a 100644
--- a/RELEASE_NOTES
+++ b/RELEASE_NOTES
@@ -181,6 +181,11 @@ ACCOUNTING CHANGES
   accounting reports are currently under development and will be released 
   soon.
 
+* A new command, sreport, is available for generating accounting reports.
+  While the sacct command can be used to generate information about 
+  individual jobs, sreport can combine this data to report utilization 
+  information by cluster, bank account, user, etc. 
+
 * Job completion records can now be written to a MySQL or PostGreSQL
   database in addition to a test file as controlled using the JobCompType
   parameter.
diff --git a/auxdir/x_ac_cflags.m4 b/auxdir/x_ac_cflags.m4
new file mode 100644
index 0000000000000000000000000000000000000000..09ed0b23dd6bed1c97ed6106fef7188ae441f7b1
--- /dev/null
+++ b/auxdir/x_ac_cflags.m4
@@ -0,0 +1,24 @@
+##*****************************************************************************
+## $Id: x_ac_cflags.m4 5401 2005-09-22 01:56:49Z morrone $
+##*****************************************************************************
+#  AUTHOR:
+#    Danny Auble  <da@llnl.gov>
+#
+#  SYNOPSIS:
+#    X_AC_CFLAGS
+#
+#  DESCRIPTION:
+#    Add extra cflags 
+##*****************************************************************************
+
+
+AC_DEFUN([X_AC_CFLAGS],
+[
+	# This is here to avoid a bug in the gcc compiler 3.4.6
+	# Without this flag there is a bug when pointing to other functions 
+	# and then using them.  It is also advised to set the flag if there
+	# are goto statements you may get better performance.
+	if test "$GCC" == "yes"; then
+		CFLAGS="$CFLAGS -fno-gcse"    
+	fi
+])
diff --git a/auxdir/x_ac_databases.m4 b/auxdir/x_ac_databases.m4
index 056eb840a33562a238c359cd52d0cec5fb6c4c61..8bed9a7039708d3745bf5262ea3dcb484b166ea3 100644
--- a/auxdir/x_ac_databases.m4
+++ b/auxdir/x_ac_databases.m4
@@ -15,8 +15,20 @@ AC_DEFUN([X_AC_DATABASES],
 [
 	#Check for MySQL
 	ac_have_mysql="no"
+	_x_ac_mysql_bin="no"
 	### Check for mysql_config program
-    	AC_PATH_PROG(HAVEMYSQLCONFIG, mysql_config, no)
+	AC_ARG_WITH(
+		[mysql_config],
+		AS_HELP_STRING(--with-mysql_config=PATH, 
+			Specify path to mysql_config binary),
+		[_x_ac_mysql_bin="$withval"])
+	
+	if test x$_x_ac_mysql_bin = xno; then
+    		AC_PATH_PROG(HAVEMYSQLCONFIG, mysql_config, no)
+	else
+   		AC_PATH_PROG(HAVEMYSQLCONFIG, mysql_config, no, $_x_ac_mysql_bin)
+	fi
+
 	if test x$HAVEMYSQLCONFIG = xno; then
         	AC_MSG_WARN([*** mysql_config not found. Evidently no MySQL install on system.])
 	else
@@ -71,8 +83,20 @@ AC_DEFUN([X_AC_DATABASES],
 
 	#Check for PostgreSQL
 	ac_have_postgres="no"
+	_x_ac_pgsql_bin="no"
 	### Check for pg_config program
-    	AC_PATH_PROG(HAVEPGCONFIG, pg_config, no)
+ 	AC_ARG_WITH(
+		[pg_config],
+		AS_HELP_STRING(--with-pg_config=PATH, 
+			Specify path to pg_config binary),
+		[_x_ac_pgsql_bin="$withval"])
+
+	if test x$_x_ac_pgsql_bin = xno; then
+    		AC_PATH_PROG(HAVEPGCONFIG, pg_config, no)
+	else
+  		AC_PATH_PROG(HAVEPGCONFIG, pg_config, no, $_x_ac_pgsql_bin)
+	fi
+  
 	if test x$HAVEPGCONFIG = xno; then
         	AC_MSG_WARN([*** pg_config not found. Evidently no PostgreSQL install on system.])
 	else
diff --git a/auxdir/x_ac_gtk.m4 b/auxdir/x_ac_gtk.m4
index cdd4d779885ecf03a7f8e1e473a3411e20b596cd..ecdf3cf47504c54fe7a24737577a19d1718d1322 100644
--- a/auxdir/x_ac_gtk.m4
+++ b/auxdir/x_ac_gtk.m4
@@ -16,12 +16,24 @@ AC_DEFUN([X_AC_GTK],
 [
 ### Set to "no" if any test fails
     ac_have_gtk="yes"
+    _x_ac_pkcfg_bin="no"
 
 ### Check for pkg-config program
-    AC_PATH_PROG(HAVEPKGCONFIG, pkg-config, no)
+    AC_ARG_WITH(
+	    [pkg-config],
+	    AS_HELP_STRING(--with-pkg-config=PATH, 
+		    Specify path to pkg-config binary),
+	    [_x_ac_pkcfg_bin="$withval"])
+    
+    if test x$_x_ac_pkcfg_bin = xno; then
+    	    AC_PATH_PROG(HAVEPKGCONFIG, pkg-config, no)
+    else
+   	    AC_PATH_PROG(HAVEPKGCONFIG, pkg-config, no, $_x_ac_pkcfg_bin)
+    fi
+    
     if test x$HAVEPKGCONFIG = xno; then
-        AC_MSG_WARN([*** pkg-config not found. Cannot probe for libglade-2.0 or gtk+-2.0.])
-        ac_have_gtk="no"
+            AC_MSG_WARN([*** pkg-config not found. Cannot probe for libglade-2.0 or gtk+-2.0.])
+            ac_have_gtk="no"
     fi
 
 ### Check for libglade package (We don't need this right now so don't add it)
diff --git a/configure b/configure
index e0ef76114189c9580c0afc18dbbb56f3558a5ba7..4da1885653546ef99a7b915c1f0d871deeff3a47 100755
--- a/configure
+++ b/configure
@@ -1594,6 +1594,10 @@ Optional Packages:
   --with-db2-dir=PATH     Specify path to DB2 library's parent directory
   --with-bg-serial=NAME   set BG_SERIAL value BGL
   --with-xcpu=PATH        specify path to XCPU directory
+  --with-pkg-config=PATH  Specify path to pkg-config binary
+  --with-mysql_config=PATH
+                          Specify path to mysql_config binary
+  --with-pg_config=PATH   Specify path to pg_config binary
   --with-slurmctld-port=N set slurmctld default port 6817
   --with-slurmd-port=N    set slurmd default port 6818
   --with-slurmdbd-port=N  set slurmdbd default port 6819
@@ -7115,7 +7119,7 @@ ia64-*-hpux*)
   ;;
 *-*-irix6*)
   # Find out which ABI we are using.
-  echo '#line 7118 "configure"' > conftest.$ac_ext
+  echo '#line 7122 "configure"' > conftest.$ac_ext
   if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
   (eval $ac_compile) 2>&5
   ac_status=$?
@@ -9221,11 +9225,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:9224: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:9228: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>conftest.err)
    ac_status=$?
    cat conftest.err >&5
-   echo "$as_me:9228: \$? = $ac_status" >&5
+   echo "$as_me:9232: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s "$ac_outfile"; then
      # The compiler can only warn and ignore the option if not recognized
      # So say no if there are warnings other than the usual output.
@@ -9511,11 +9515,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:9514: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:9518: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>conftest.err)
    ac_status=$?
    cat conftest.err >&5
-   echo "$as_me:9518: \$? = $ac_status" >&5
+   echo "$as_me:9522: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s "$ac_outfile"; then
      # The compiler can only warn and ignore the option if not recognized
      # So say no if there are warnings other than the usual output.
@@ -9615,11 +9619,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:9618: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:9622: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>out/conftest.err)
    ac_status=$?
    cat out/conftest.err >&5
-   echo "$as_me:9622: \$? = $ac_status" >&5
+   echo "$as_me:9626: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s out/conftest2.$ac_objext
    then
      # The compiler can only warn and ignore the option if not recognized
@@ -11992,7 +11996,7 @@ else
   lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
   lt_status=$lt_dlunknown
   cat > conftest.$ac_ext <<EOF
-#line 11995 "configure"
+#line 11999 "configure"
 #include "confdefs.h"
 
 #if HAVE_DLFCN_H
@@ -12092,7 +12096,7 @@ else
   lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
   lt_status=$lt_dlunknown
   cat > conftest.$ac_ext <<EOF
-#line 12095 "configure"
+#line 12099 "configure"
 #include "confdefs.h"
 
 #if HAVE_DLFCN_H
@@ -14493,11 +14497,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:14496: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:14500: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>conftest.err)
    ac_status=$?
    cat conftest.err >&5
-   echo "$as_me:14500: \$? = $ac_status" >&5
+   echo "$as_me:14504: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s "$ac_outfile"; then
      # The compiler can only warn and ignore the option if not recognized
      # So say no if there are warnings other than the usual output.
@@ -14597,11 +14601,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:14600: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:14604: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>out/conftest.err)
    ac_status=$?
    cat out/conftest.err >&5
-   echo "$as_me:14604: \$? = $ac_status" >&5
+   echo "$as_me:14608: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s out/conftest2.$ac_objext
    then
      # The compiler can only warn and ignore the option if not recognized
@@ -16195,11 +16199,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:16198: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:16202: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>conftest.err)
    ac_status=$?
    cat conftest.err >&5
-   echo "$as_me:16202: \$? = $ac_status" >&5
+   echo "$as_me:16206: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s "$ac_outfile"; then
      # The compiler can only warn and ignore the option if not recognized
      # So say no if there are warnings other than the usual output.
@@ -16299,11 +16303,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:16302: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:16306: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>out/conftest.err)
    ac_status=$?
    cat out/conftest.err >&5
-   echo "$as_me:16306: \$? = $ac_status" >&5
+   echo "$as_me:16310: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s out/conftest2.$ac_objext
    then
      # The compiler can only warn and ignore the option if not recognized
@@ -18519,11 +18523,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:18522: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:18526: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>conftest.err)
    ac_status=$?
    cat conftest.err >&5
-   echo "$as_me:18526: \$? = $ac_status" >&5
+   echo "$as_me:18530: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s "$ac_outfile"; then
      # The compiler can only warn and ignore the option if not recognized
      # So say no if there are warnings other than the usual output.
@@ -18809,11 +18813,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:18812: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:18816: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>conftest.err)
    ac_status=$?
    cat conftest.err >&5
-   echo "$as_me:18816: \$? = $ac_status" >&5
+   echo "$as_me:18820: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s "$ac_outfile"; then
      # The compiler can only warn and ignore the option if not recognized
      # So say no if there are warnings other than the usual output.
@@ -18913,11 +18917,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:18916: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:18920: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>out/conftest.err)
    ac_status=$?
    cat out/conftest.err >&5
-   echo "$as_me:18920: \$? = $ac_status" >&5
+   echo "$as_me:18924: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s out/conftest2.$ac_objext
    then
      # The compiler can only warn and ignore the option if not recognized
@@ -25155,9 +25159,18 @@ fi
 
 ### Set to "no" if any test fails
     ac_have_gtk="yes"
+    _x_ac_pkcfg_bin="no"
 
 ### Check for pkg-config program
-    # Extract the first word of "pkg-config", so it can be a program name with args.
+
+# Check whether --with-pkg-config was given.
+if test "${with_pkg_config+set}" = set; then
+  withval=$with_pkg_config; _x_ac_pkcfg_bin="$withval"
+fi
+
+
+    if test x$_x_ac_pkcfg_bin = xno; then
+    	    # Extract the first word of "pkg-config", so it can be a program name with args.
 set dummy pkg-config; ac_word=$2
 { echo "$as_me:$LINENO: checking for $ac_word" >&5
 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; }
@@ -25198,10 +25211,54 @@ echo "${ECHO_T}no" >&6; }
 fi
 
 
+    else
+   	    # Extract the first word of "pkg-config", so it can be a program name with args.
+set dummy pkg-config; ac_word=$2
+{ echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; }
+if test "${ac_cv_path_HAVEPKGCONFIG+set}" = set; then
+  echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+  case $HAVEPKGCONFIG in
+  [\\/]* | ?:[\\/]*)
+  ac_cv_path_HAVEPKGCONFIG="$HAVEPKGCONFIG" # Let the user override the test with a path.
+  ;;
+  *)
+  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $_x_ac_pkcfg_bin
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+  for ac_exec_ext in '' $ac_executable_extensions; do
+  if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+    ac_cv_path_HAVEPKGCONFIG="$as_dir/$ac_word$ac_exec_ext"
+    echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+done
+IFS=$as_save_IFS
+
+  test -z "$ac_cv_path_HAVEPKGCONFIG" && ac_cv_path_HAVEPKGCONFIG="no"
+  ;;
+esac
+fi
+HAVEPKGCONFIG=$ac_cv_path_HAVEPKGCONFIG
+if test -n "$HAVEPKGCONFIG"; then
+  { echo "$as_me:$LINENO: result: $HAVEPKGCONFIG" >&5
+echo "${ECHO_T}$HAVEPKGCONFIG" >&6; }
+else
+  { echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6; }
+fi
+
+
+    fi
+
     if test x$HAVEPKGCONFIG = xno; then
-        { echo "$as_me:$LINENO: WARNING: *** pkg-config not found. Cannot probe for libglade-2.0 or gtk+-2.0." >&5
+            { echo "$as_me:$LINENO: WARNING: *** pkg-config not found. Cannot probe for libglade-2.0 or gtk+-2.0." >&5
 echo "$as_me: WARNING: *** pkg-config not found. Cannot probe for libglade-2.0 or gtk+-2.0." >&2;}
-        ac_have_gtk="no"
+            ac_have_gtk="no"
     fi
 
 ### Check for libglade package (We don't need this right now so don't add it)
@@ -25327,8 +25384,17 @@ fi
 
 	#Check for MySQL
 	ac_have_mysql="no"
+	_x_ac_mysql_bin="no"
 	### Check for mysql_config program
-    	# Extract the first word of "mysql_config", so it can be a program name with args.
+
+# Check whether --with-mysql_config was given.
+if test "${with_mysql_config+set}" = set; then
+  withval=$with_mysql_config; _x_ac_mysql_bin="$withval"
+fi
+
+
+	if test x$_x_ac_mysql_bin = xno; then
+    		# Extract the first word of "mysql_config", so it can be a program name with args.
 set dummy mysql_config; ac_word=$2
 { echo "$as_me:$LINENO: checking for $ac_word" >&5
 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; }
@@ -25369,6 +25435,50 @@ echo "${ECHO_T}no" >&6; }
 fi
 
 
+	else
+   		# Extract the first word of "mysql_config", so it can be a program name with args.
+set dummy mysql_config; ac_word=$2
+{ echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; }
+if test "${ac_cv_path_HAVEMYSQLCONFIG+set}" = set; then
+  echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+  case $HAVEMYSQLCONFIG in
+  [\\/]* | ?:[\\/]*)
+  ac_cv_path_HAVEMYSQLCONFIG="$HAVEMYSQLCONFIG" # Let the user override the test with a path.
+  ;;
+  *)
+  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $_x_ac_mysql_bin
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+  for ac_exec_ext in '' $ac_executable_extensions; do
+  if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+    ac_cv_path_HAVEMYSQLCONFIG="$as_dir/$ac_word$ac_exec_ext"
+    echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+done
+IFS=$as_save_IFS
+
+  test -z "$ac_cv_path_HAVEMYSQLCONFIG" && ac_cv_path_HAVEMYSQLCONFIG="no"
+  ;;
+esac
+fi
+HAVEMYSQLCONFIG=$ac_cv_path_HAVEMYSQLCONFIG
+if test -n "$HAVEMYSQLCONFIG"; then
+  { echo "$as_me:$LINENO: result: $HAVEMYSQLCONFIG" >&5
+echo "${ECHO_T}$HAVEMYSQLCONFIG" >&6; }
+else
+  { echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6; }
+fi
+
+
+	fi
+
 	if test x$HAVEMYSQLCONFIG = xno; then
         	{ echo "$as_me:$LINENO: WARNING: *** mysql_config not found. Evidently no MySQL install on system." >&5
 echo "$as_me: WARNING: *** mysql_config not found. Evidently no MySQL install on system." >&2;}
@@ -25471,8 +25581,17 @@ echo "$as_me: WARNING: *** MySQL test program execution failed." >&2;}
 
 	#Check for PostgreSQL
 	ac_have_postgres="no"
+	_x_ac_pgsql_bin="no"
 	### Check for pg_config program
-    	# Extract the first word of "pg_config", so it can be a program name with args.
+
+# Check whether --with-pg_config was given.
+if test "${with_pg_config+set}" = set; then
+  withval=$with_pg_config; _x_ac_pgsql_bin="$withval"
+fi
+
+
+	if test x$_x_ac_pgsql_bin = xno; then
+    		# Extract the first word of "pg_config", so it can be a program name with args.
 set dummy pg_config; ac_word=$2
 { echo "$as_me:$LINENO: checking for $ac_word" >&5
 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; }
@@ -25513,6 +25632,50 @@ echo "${ECHO_T}no" >&6; }
 fi
 
 
+	else
+  		# Extract the first word of "pg_config", so it can be a program name with args.
+set dummy pg_config; ac_word=$2
+{ echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; }
+if test "${ac_cv_path_HAVEPGCONFIG+set}" = set; then
+  echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+  case $HAVEPGCONFIG in
+  [\\/]* | ?:[\\/]*)
+  ac_cv_path_HAVEPGCONFIG="$HAVEPGCONFIG" # Let the user override the test with a path.
+  ;;
+  *)
+  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $_x_ac_pgsql_bin
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+  for ac_exec_ext in '' $ac_executable_extensions; do
+  if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+    ac_cv_path_HAVEPGCONFIG="$as_dir/$ac_word$ac_exec_ext"
+    echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+done
+IFS=$as_save_IFS
+
+  test -z "$ac_cv_path_HAVEPGCONFIG" && ac_cv_path_HAVEPGCONFIG="no"
+  ;;
+esac
+fi
+HAVEPGCONFIG=$ac_cv_path_HAVEPGCONFIG
+if test -n "$HAVEPGCONFIG"; then
+  { echo "$as_me:$LINENO: result: $HAVEPGCONFIG" >&5
+echo "${ECHO_T}$HAVEPGCONFIG" >&6; }
+else
+  { echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6; }
+fi
+
+
+	fi
+
 	if test x$HAVEPGCONFIG = xno; then
         	{ echo "$as_me:$LINENO: WARNING: *** pg_config not found. Evidently no PostgreSQL install on system." >&5
 echo "$as_me: WARNING: *** pg_config not found. Evidently no PostgreSQL install on system." >&2;}
diff --git a/doc/html/configurator.html.in b/doc/html/configurator.html.in
index 7239c2fa32ca441fb813803cbb98a0a79989fb43..2b61e0922a9f68addf9f1bd8fc92f14701a3b418 100644
--- a/doc/html/configurator.html.in
+++ b/doc/html/configurator.html.in
@@ -148,6 +148,7 @@ function displayfile()
    "#CheckpointType=checkpoint/none <br>" +
    "CryptoType=crypto/" + get_radio_value(document.config.crypto_type) + "<br>" +
    "#DisableRootJobs=NO <br>" +
+   "#EnforcePartLimits=NO <br>" +
    get_field("Epilog",document.config.epilog) + "<br>" +
    "#FirstJobId=1 <br>" +
    get_field("JobCredentialPrivateKey", document.config.private_key) + "<br>" +
diff --git a/doc/html/documentation.shtml b/doc/html/documentation.shtml
index de8b51111092b9b2449362efaf09b6cd49e20186..9136821b4e622bf61cd971049984c3d1f82b7b62 100644
--- a/doc/html/documentation.shtml
+++ b/doc/html/documentation.shtml
@@ -7,7 +7,6 @@ Also see <a href="publications.html">Publications and Presentations</a>.
 <h2>SLURM Users</h2>
 <ul>
 <li><a href="quickstart.shtml">Quick Start User Guide</a></li>
-<li><a href="https://computing.llnl.gov/LCdocs/slurm/">SLURM Reference Manual</a></li>
 <li><a href="mc_support.shtml">Support for Multi-core/Multi-threaded Architectures</a></li>
 <li><a href="quickstart.shtml#mpi">Guide to MPI Use</a></li>
 <li><a href="bluegene.shtml">Blue Gene User and Administrator Guide</a></li>
@@ -52,6 +51,6 @@ Also see <a href="publications.html">Publications and Presentations</a>.
 <li><a href="taskplugins.shtml">Task Plugin Programmer Guide</a></li>
 </ul>
 
-<p style="text-align:center;">Last modified 28 April 2008</p>
+<p style="text-align:center;">Last modified 3 June 2008</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/publications.shtml b/doc/html/publications.shtml
index 75f6da57281554f68b620a656f308f6d08a5c3c6..2dde255da3a6b334e1f00449e1bc333771271a1f 100644
--- a/doc/html/publications.shtml
+++ b/doc/html/publications.shtml
@@ -2,6 +2,8 @@
 
 <h1>Publications and Presentations</h1>
 
+<p>Note that some of these contained dated information.</p>
+
 <h2>Presentations</h2>
 
 <ul>
@@ -32,6 +34,6 @@ volume 2862 of <i>Lecture Notes in Computer Science</i>,
 pages 44-60,
 Springer-Verlag, 2003.</p>
 
-<p style="text-align:center;">Last modified 28 April 2008</p>
+<p style="text-align:center;">Last modified 3 June 2008</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/quickstart.shtml b/doc/html/quickstart.shtml
index b9357f81d1a33cab694179f3848000870e097682..7eac62a42a4221c6a27f2233c9e0699290f39023 100644
--- a/doc/html/quickstart.shtml
+++ b/doc/html/quickstart.shtml
@@ -17,7 +17,7 @@ it arbitrates contention for resources by managing a queue of pending work.</p>
 <p>As depicted in Figure 1, SLURM consists of a <b>slurmd</b> daemon running on 
 each compute node and a central <b>slurmctld</b> daemon running on a management node 
 (with optional fail-over twin). 
-The <b>slurmd</b> daemons provide fault-tolerant hierarchical communciations.
+The <b>slurmd</b> daemons provide fault-tolerant hierarchical communications.
 The user commands include: <b>sacct</b>, <b>salloc</b>, <b>sattach</b>,
 <b>sbatch</b>, <b>sbcast</b>, <b>scancel</b>, <b>scontrol</b>,  
 <b>sinfo</b>, <b>smap</b>, <b>squeue</b>, <b>srun</b>, <b>strigger</b> 
@@ -121,28 +121,146 @@ get and update state information for jobs, partitions, and nodes managed by SLUR
 <p class="footer"><a href="#top">top</a></p>
 
 <h2>Examples</h2>
-<p>Execute <span class="commandline">/bin/hostname</span> on four nodes (<span class="commandline">-N4</span>). 
-Include task numbers on the output (<span class="commandline">-l</span>). The 
-default partition will be used. One task per node will be used by default. </p>
+<p>First we determine what partitions exist on the system, what nodes 
+they include, and general system state. This information is provided 
+by the <span class="commandline">sinfo</span> command. 
+In the example below we find there are two partitions: <i>debug</i> 
+and <i>batch</i>.
+The <i>*</i> following the name <i>debug</i> indicates this is the 
+default partition for submitted jobs. 
+We see that both partitions are in an <i>UP</i> state. 
+Some configurations may include partitions for larger jobs
+that are <i>DOWN</i> except on weekends or at night. The information 
+about each partition may be split over more than one line so that 
+nodes in different states can be identified. 
+In this case, the two nodes <i>adev[1-2]</i> are <i>down</i>. 
+The <i>*</i> following the state <i>down</i> indicate the nodes are
+not responding. Note the use of a concise expression for node
+name specification with a common prefix <i>adev</i> and numeric
+ranges or specific numbers identified. This format allows for 
+very clusters to be easily managed.
+The <span class="commandline">sinfo</span> command
+has many options to easily let you view the information of interest
+to you in whatever format you prefer.
+See the man page for more information.</p>
 <pre>
-adev0: srun -N4 -l /bin/hostname
-0: adev9
-1: adev10
-2: adev11
-3: adev12
-</pre> <p>Execute <span class="commandline">/bin/hostname</span> in four 
-tasks (<span class="commandline">-n4</span>). Include task numbers on the output 
-(<span class="commandline">-l</span>). The default partition will be used. One 
-processor per task will be used by default (note that we don't specify a node 
-count).</p>
+adev0: sinfo
+PARTITION AVAIL  TIMELIMIT NODES  STATE NODELIST
+debug*       up      30:00     2  down* adev[1-2]
+debug*       up      30:00     3   idle adev[3-5]
+batch        up      30:00     3  down* adev[6,13,15]
+batch        up      30:00     3  alloc adev[7-8,14]
+batch        up      30:00     4   idle adev[9-12]
+</pre>
+
+<p>Next we determine what jobs exist on the system using the 
+<span class="commandline">squeue</span> command. The
+<i>ST</i> field is job state.
+Two jobs are in a running state (<i>R</i> is an abbreviation 
+for <i>Running</i>) while one job is in a pending state
+(<i>PD</i> is an abbreviation for <i>Pending</i>).
+The <i>TIME</i> field shows how long the jobs have run 
+for using the format <i>days-hours:minutes:seconds</i>.
+The <i>NODELIST(REASON)</i> field indicates where the
+job is running or the reason it is still pending. Typical
+reasons for pending jobs are <i>Resources</i> (waiting
+for resources to become available) and <i>Priority</i>
+(queued behind a higher priority job).
+The <span class="commandline">squeue</span> command
+has many options to easily let you view the information of interest
+to you in whatever format you prefer. 
+See the man page for more information.</p>
+<pre>
+adev0: squeue
+JOBID PARTITION  NAME  USER ST  TIME NODES NODELIST(REASON)
+65646     batch  chem  mike  R 24:19     2 adev[7-8]
+65647     batch   bio  joan  R  0:09     1 adev14
+65648     batch  math  phil PD  0:00     6 (Resources)
+</pre>
+
+<p>The <span class="commandline">scontrol</span> command
+can be used to report more detailed information about 
+nodes, partitions, jobs, job steps, and configuration.
+It can also be used by system administrators to make
+configuration changes. A couple of examples are shown 
+below. See the man page for more information.</p>
+<pre>
+adev0: scontrol show partition
+PartitionName=debug TotalNodes=5 TotalCPUs=40 RootOnly=NO
+   Default=YES Shared=FORCE:4 Priority=1 State=UP 
+   MaxTime=00:30:00 Hidden=NO
+   MinNodes=1 MaxNodes=26 DisableRootJobs=NO AllowGroups=ALL
+   Nodes=adev[1-5] NodeIndices=0-4
+
+PartitionName=batch TotalNodes=10 TotalCPUs=80 RootOnly=NO
+   Default=NO Shared=FORCE:4 Priority=1 State=UP
+   MaxTime=16:00:00 Hidden=NO
+   MinNodes=1 MaxNodes=26 DisableRootJobs=NO AllowGroups=ALL
+   Nodes=adev[6-15] NodeIndices=5-14
+
+
+adev0: scontrol show node adev1
+NodeName=adev1 State=DOWN* CPUs=8 AllocCPUs=0
+   RealMemory=4000 TmpDisk=0
+   Sockets=2 Cores=4 Threads=1 Weight=1 Features=intel 
+   Reason=Not responding [slurm@06/02-14:01:24]
+
+65648     batch  math  phil PD  0:00     6 (Resources)
+adev0: scontrol show job
+JobId=65672 UserId=phil(5136) GroupId=phil(5136)
+   Name=math
+   Priority=4294901603 Partition=batch BatchFlag=1
+   AllocNode:Sid=adev0:16726 TimeLimit=00:10:00 ExitCode=0:0
+   StartTime=06/02-15:27:11 EndTime=06/02-15:37:11
+   JobState=PENDING NodeList=(null) NodeListIndices=
+   ReqProcs=24 ReqNodes=1 ReqS:C:T=1-65535:1-65535:1-65535
+   Shared=1 Contiguous=0 CPUs/task=0 Licenses=(null)
+   MinProcs=1 MinSockets=1 MinCores=1 MinThreads=1
+   MinMemory=0 MinTmpDisk=0 Features=(null)
+   Dependency=(null) Account=(null) Requeue=1
+   Reason=None Network=(null)
+   ReqNodeList=(null) ReqNodeListIndices=
+   ExcNodeList=(null) ExcNodeListIndices=
+   SubmitTime=06/02-15:27:11 SuspendTime=None PreSusTime=0
+   Command=/home/phil/math
+   WorkDir=/home/phil
+</pre>
+
+<p>It is possible to create a resource allocation and launch
+the tasks for a job step in a single command line using the 
+<span class="commandline">srun</span> command. Depending 
+upon the MPI implementation used, MPI jobs may also be 
+launched in this manner. 
+See the <a href="#mpi">MPI</a> section for more MPI-specific information.
+In this example we execute <span class="commandline">/bin/hostname</span> 
+on three nodes (<i>-N3</i>) and include task numbers on the output (<i>-l</i>). 
+The default partition will be used. 
+One task per node will be used by default.
+Note that the <span class="commandline">srun</span> command has
+many options available to control what resource are allocated
+and how tasks are distributed across those resources.</p>
+<pre>
+adev0: srun -N3 -l /bin/hostname
+0: adev3
+1: adev4
+2: adev5
+</pre>
+
+<p>This variation on the previous example executes 
+<span class="commandline">/bin/hostname</span> in four tasks (<i>-n4</i>). 
+One processor per task will be used by default (note that we don't specify 
+a node count).</p>
 <pre>
 adev0: srun -n4 -l /bin/hostname
-0: adev9
-1: adev9
-2: adev10
-3: adev10
-</pre> <p>Submit the script my.script for later execution.
-Explicitly use the nodes adev9 and adev10 ("-w "adev[9-10]", note the use of a 
+0: adev3
+1: adev3
+2: adev3
+3: adev3
+</pre>
+
+<p>One common mode of operation is to submit a the for later execution.
+In this example the script name is <i>my.script</i> and we explicitly use 
+the nodes adev9 and adev10 (<i>-w "adev[9-10]"</i>, note the use of a 
 node range expression). 
 We also explicitly state that the subsequent job steps will spawn four tasks 
 each, which will insure that our allocation contains at least four processors
@@ -181,29 +299,43 @@ adev9
 3: /home/jette
 </pre>
 
-<p>Submit a job, get its status, and cancel it. </p>
+<p>The final mode of operation is to create a resource allocation 
+and spawn job steps within that allocation. 
+The <span class="commandline">salloc</span> command is would be used
+to create a resource allocation and typically start a shell within 
+that allocation. 
+One or more job steps would typically be executed within that allocation 
+using the srun command to launch the tasks. 
+Finally the shell created by salloc would be terminated using the 
+<i>exit</i> command.
+In this example we will also use the <span class="commandline">sbcast</span> 
+command to transfer the executable program to local storage, /tmp/joe.a.out,
+on the allocated nodes (1024 nodes in this example).
+After executing the program, we delete it from local storage</p>
 <pre>
-adev0: sbatch my.sleeper
+tux0: salloc -N1024 bash
+$ sbcast a.out /tmp/joe.a.out
+Granted job allocation 471
+$ srun /tmp/joe.a.out
+Result is 471
+$ srun rm /tmp/joe.a.out
+$ exit
+salloc: Relinquishing job allocation 1234
+</pre>
+
+<p>In this example, we submit a batch job, get its status, and cancel it. </p>
+<pre>
+adev0: sbatch test
 srun: jobid 473 submitted
 
 adev0: squeue
-  JOBID PARTITION NAME     USER  ST TIME  NODES NODELIST(REASON)
-    473 batch     my.sleep jette R  00:00 1     adev9
+JOBID PARTITION NAME USER ST TIME  NODES NODELIST(REASON)
+  473 batch     test jill R  00:00 1     adev9
  
 adev0: scancel 473
 
 adev0: squeue
-  JOBID PARTITION NAME     USER  ST TIME  NODES NODELIST(REASON)
-</pre>
-
-<p>Get the SLURM partition and node status.</p>
-<pre>
-adev0: sinfo
-PARTITION AVAIL  TIMELIMIT NODES  STATE NODELIST
-debug     up     00:30:00      8   idle adev[0-7]
-batch     up     12:00:00      1   down adev8
-                 12:00:00      7   idle adev[9-15]
-
+JOBID PARTITION NAME USER ST TIME  NODES NODELIST(REASON)
 </pre>
 <p class="footer"><a href="#top">top</a></p>
 
@@ -236,13 +368,15 @@ tasks. When using <span class="commandline">salloc</span> command,
 <span class="commandline">mpirun</span>'s -nolocal option is recommended. 
 For example:
 <pre>
-$ salloc -n4 sh    # allocates 4 processors and spawns shell for job
+$ salloc -n4 sh    # allocates 4 processors 
+                   # and spawns shell for job
 &gt; mpirun -np 4 -nolocal a.out
-&gt; exit          # exits shell spawned by initial salloc command
+&gt; exit             # exits shell spawned by 
+                   # initial srun command
 </pre>
 <p>Note that any direct use of <span class="commandline">srun</span>
 will only launch one task per node when the LAM/MPI plugin is used.
-To launch more than one task per node usng the
+To launch more than one task per node using the
 <span class="commandline">srun</span> command, the <i>--mpi=none</i>
 option will be required to explicitly disable the LAM/MPI plugin.</p>
 
@@ -264,7 +398,8 @@ the maximum number of tasks required for the job. Then execute the
 Do not directly execute the <span class="commandline">srun</span> command 
 to launch LAM/MPI tasks. For example: 
 <pre>
-$ salloc -n16 sh  # allocates 16 processors and spawns shell for job
+$ salloc -n16 sh  # allocates 16 processors 
+                  # and spawns shell for job
 &gt; lamboot
 &gt; mpirun -np 16 foo args
 1234 foo running on adev0 (o)
@@ -272,11 +407,12 @@ $ salloc -n16 sh  # allocates 16 processors and spawns shell for job
 etc.
 &gt; lamclean
 &gt; lamhalt
-&gt; exit         # exits shell spawned by initial srun command
+&gt; exit            # exits shell spawned by 
+                  # initial srun command
 </pre>
 <p>Note that any direct use of <span class="commandline">srun</span> 
 will only launch one task per node when the LAM/MPI plugin is configured
-as the default plugin.  To launch more than one task per node usng the 
+as the default plugin.  To launch more than one task per node using the 
 <span class="commandline">srun</span> command, the <i>--mpi=none</i>
 option would be required to explicitly disable the LAM/MPI plugin
 if that is the system default.</p>
@@ -304,15 +440,15 @@ $ srun -n20 a.out
 <b>NOTES:</b>
 <ul>
 <li>Some MPICH2 functions are not currently supported by the PMI 
-libary integrated with SLURM</li>
+library integrated with SLURM</li>
 <li>Set the environment variable <b>PMI_DEBUG</b> to a numeric value 
-of 1 or higher for the PMI libary to print debugging information</li>
+of 1 or higher for the PMI library to print debugging information</li>
 </ul></p>
 
 <p><a href="http://www.myri.com/scs/download-mpichgm.html"><b>MPICH-GM</b></a>
 jobs can be launched directly by <b>srun</b> command.
 SLURM's <i>mpichgm</i> MPI plugin must be used to establish communications 
-between the laucnhed tasks. This can be accomplished either using the SLURM 
+between the launched tasks. This can be accomplished either using the SLURM 
 configuration parameter <i>MpiDefault=mpichgm</i> in <b>slurm.conf</b>
 or srun's <i>--mpi=mpichgm</i> option.
 <pre>
@@ -323,7 +459,7 @@ $ srun -n16 --mpi=mpichgm a.out
 <p><a href="http://www.myri.com/scs/download-mpichmx.html"><b>MPICH-MX</b></a>
 jobs can be launched directly by <b>srun</b> command.
 SLURM's <i>mpichmx</i> MPI plugin must be used to establish communications
-between the laucnhed tasks. This can be accomplished either using the SLURM
+between the launched tasks. This can be accomplished either using the SLURM
 configuration parameter <i>MpiDefault=mpichmx</i> in <b>slurm.conf</b>
 or srun's <i>--mpi=mpichmx</i> option.
 <pre>
@@ -334,7 +470,7 @@ $ srun -n16 --mpi=mpichmx a.out
 <p><a href="http://mvapich.cse.ohio-state.edu/"><b>MVAPICH</b></a>
 jobs can be launched directly by <b>srun</b> command.
 SLURM's <i>mvapich</i> MPI plugin must be used to establish communications 
-between the laucnhed tasks. This can be accomplished either using the SLURM 
+between the launched tasks. This can be accomplished either using the SLURM 
 configuration parameter <i>MpiDefault=mvapich</i> in <b>slurm.conf</b>
 or srun's <i>--mpi=mvapich</i> option.
 <pre>
@@ -353,7 +489,7 @@ documentation for "CQ or QP Creation failure".</p>
 <p><a href="http://nowlab.cse.ohio-state.edu/projects/mpi-iba"><b>MVAPICH2</b></a>
 jobs can be launched directly by <b>srun</b> command.
 SLURM's <i>none</i> MPI plugin must be used to establish communications 
-between the laucnhed tasks. This can be accomplished either using the SLURM 
+between the launched tasks. This can be accomplished either using the SLURM 
 configuration parameter <i>MpiDefault=none</i> in <b>slurm.conf</b> 
 or srun's <i>--mpi=none</i> option. The program must also be linked with
 SLURM's implementation of the PMI library so that tasks can communicate
@@ -432,6 +568,6 @@ sbatch: Submitted batch job 1234
 tasks. These tasks are not managed by SLURM since they are launched 
 outside of its control.</p>
  
-<p style="text-align:center;">Last modified 19 September 2007</p>
+<p style="text-align:center;">Last modified 2 June 2008</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/quickstart_admin.shtml b/doc/html/quickstart_admin.shtml
index 1549621fd092c1faac6a9fb5ddf22afdba34324d..6b45466871b56cb6afeeb4c322c845f96901528b 100644
--- a/doc/html/quickstart_admin.shtml
+++ b/doc/html/quickstart_admin.shtml
@@ -19,7 +19,8 @@ and <i>--sysconfdir=</i></li>
 header files, etc.</li>
 <li>Build a configuration file using your favorite web browser and
 <i>doc/html/configurator.html</i>.<br>
-NOTE: The <i>SlurmUser</i> must be created as needed prior to starting SLURM.<br>
+NOTE: The <i>SlurmUser</i> must be created as needed prior to starting SLURM
+and must exist on all nodes of the cluster.<br>
 NOTE: The parent directories for SLURM's log files, process ID files,
 state save directories, etc. are not created by SLURM.
 They must be created and made writable by <i>SlurmUser</i> as needed prior to 
@@ -164,6 +165,8 @@ controller moves (to or from backup controller) or is restarted.</p>
 <p>We recommend that you create a Unix user <i>slurm</i> for use by 
 <b>slurmctld</b>. This user name will also be specified using the 
 <b>SlurmUser</b> in the slurm.conf configuration file.
+This user must exist on all nodes of the cluster for authentication 
+of communications.
 Note that files and directories used by <b>slurmctld</b> will need to be 
 readable or writable by the user <b>SlurmUser</b> (the slurm configuration 
 files must be readable; the log file directory and state save directory 
@@ -610,6 +613,6 @@ in the NEWS file.
 
 </pre> <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 28 April 2008</p>
+<p style="text-align:center;">Last modified 5 June 2008</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/team.shtml b/doc/html/team.shtml
index 2e5ffd5470067d4f3e4fde636d1960253f8672d4..79ff195ada17116514c895aac1cc3a6c90a0b188 100644
--- a/doc/html/team.shtml
+++ b/doc/html/team.shtml
@@ -29,6 +29,7 @@
 <li>Chris Dunlap (LLNL)</li>
 <li>Joey Ekstrom (LLNL/Bringham Young University)</li>
 <li>Jim Garlick (LLNL)</li>
+<li>Didier Gazen (Laboratoire d'Aerologie, France)</li>
 <li>Mark Grondona (LLNL)</li>
 <li>Takao Hatazaki (HP, Japan)</li>
 <li>Matthieu Hautreux (CEA, France)</li>
@@ -49,6 +50,7 @@ Networking, Italy)</li>
 <li>Daniel Palermo (HP)</li>
 <li>Dan Phung (LLNL/Columbia University)</li>
 <li>Ashley Pittman (Quadrics)</li>
+<li>Vijay Ramasubramanian (University of Maryland) </li>
 <li>Andy Riebs (HP)</li>
 <li>Asier Roa (Barcelona Supercomputer Center, Spain)<li>
 <li>Miguel Ros (Barcelona Supercomputer Center, Spain)<li>
@@ -60,6 +62,6 @@ Networking, Italy)</li>
 <li>Anne-Marie Wunderlin (Bull)</li>
 </ul>
 
-<p style="text-align:center;">Last modified 22 May 2008</p>
+<p style="text-align:center;">Last modified 20 June 2008</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/man/man1/sacctmgr.1 b/doc/man/man1/sacctmgr.1
index 74b2b16b526b16307289ef35f9bc47fca9e9bd5b..d6874d63a78ffaeeb0861e565c9ab5e5f21510e3 100644
--- a/doc/man/man1/sacctmgr.1
+++ b/doc/man/man1/sacctmgr.1
@@ -1,4 +1,4 @@
-.TH SACCTMGR "1" "May 2008" "sacctmgr 1.3" "Slurm components"
+.TH SACCTMGR "1" "June 2008" "sacctmgr 1.3" "Slurm components"
 
 .SH "NAME"
 sacctmgr \- Used to view and modify Slurm account information.
@@ -333,9 +333,9 @@ To clear a previously set value use the modify command with a new value of \-1.
 .br
 > sacctmgr create account name=physics   parent=science fairshare=20
 .br
-> sacctmgr create user name=adam cluster=tux bank=physics fairshare=10
+> sacctmgr create user name=adam cluster=tux account=physics fairshare=10
 .br
-> sacctmgr modify user with name=adam cluster=tux bank=physics \
+> sacctmgr modify user with name=adam cluster=tux account=physics \
 .br
   set maxjobs=2 maxtime=30:00
 .ec
diff --git a/doc/man/man1/salloc.1 b/doc/man/man1/salloc.1
index 07b2eabcefed541a983963c34f52dec6842f4f31..b1be91d9f6e2c539ac14fc09d78d0e5eb27c54e6 100644
--- a/doc/man/man1/salloc.1
+++ b/doc/man/man1/salloc.1
@@ -275,7 +275,7 @@ The options include a number representing the size of the task block.
 This is followed by an optional specification of the task distribution 
 scheme within a block of tasks and between the blocks of tasks.
 For more details (including examples and diagrams), please see
-.ad l
+.na
 .nh
 https://computing.llnl.gov/linux/slurm/mc_support.html and
 https://computing.llnl.gov/linux/slurm/dist_plane.html.
diff --git a/doc/man/man1/sbatch.1 b/doc/man/man1/sbatch.1
index 18e38369b1a697f9e017a8fc1c8c8c2a497c8b60..77b50cfd50f4c6f62c7078dfbcaba50fdfa6f870 100644
--- a/doc/man/man1/sbatch.1
+++ b/doc/man/man1/sbatch.1
@@ -299,7 +299,7 @@ The options include a number representing the size of the task block.
 This is followed by an optional specification of the task distribution 
 scheme within a block of tasks and between the blocks of tasks.
 For more details (including examples and diagrams), please see
-.ad l
+.na
 .nh
 https://computing.llnl.gov/linux/slurm/mc_support.html and
 https://computing.llnl.gov/linux/slurm/dist_plane.html.
diff --git a/doc/man/man1/squeue.1 b/doc/man/man1/squeue.1
index b522252abe00d574f9e1cd68654c085affdb1e25..6ebadd58c52d5181822316c83998ee5328f40047 100644
--- a/doc/man/man1/squeue.1
+++ b/doc/man/man1/squeue.1
@@ -88,12 +88,14 @@ By default, output is right justified.
 .RE
 
 .IP
+Note that many of these \fItype\fR specifications are valid 
+only for jobs while others are valid only for job steps.
 Valid \fItype\fR specifications include: 
 
 .RS
 .TP 4
 \fB%a\fR
-Account associated with the job
+Account associated with the job.
 .TP
 \fB%A\fR
 Number of tasks created by a job step.
@@ -128,30 +130,30 @@ Job dependency. This job will not begin execution until the dependent job
 completes.  A value of zero implies this job has no dependencies.
 .TP
 \fB%f\fR 
-Features required by the job
+Features required by the job.
 .TP
 \fB%g\fR 
-Group name
+Group name of the job.
 .TP
 \fB%G\fR
-Group ID
+Group ID of the job.
 .TP
 \fB%h\fR
-Can the nodes allocated to the job be shared with other jobs
+Can the nodes allocated to the job be shared with other jobs.
 .TP
 \fB%H\fR 
 Minimum number of sockets per node requested by the job.
 This reports the value of the \fBsrun \-\-minsockets\fR option.
 .TP
 \fB%i\fR
-Job or job step id
+Job or job step id.
 .TP
 \fB%I\fR 
 Minimum number of cores per socket requested by the job.
 This reports the value of the \fBsrun \-\-mincores\fR option.
 .TP
 \fB%j\fR
-Job or job step name
+Job or job step name.
 .TP
 \fB%J\fR 
 Minimum number of threads per core requested by the job.
@@ -186,17 +188,17 @@ in the node count being greater than the number of listed nodes.
 Minimum number of nodes requested by the job.
 .TP
 \fB%O\fR
-Are contiguous nodes requested by the job
+Are contiguous nodes requested by the job.
 .TP
 \fB%p\fR
 Priority of the job (converted to a floating point number between 0.0 and 1.0).
 Also see \fB%Q\fR.
 .TP
 \fB%P\fR 
-Partition of the job or job step
+Partition of the job or job step.
 .TP
 \fB%q\fR 
-Comment associated with the job
+Comment associated with the job.
 .TP
 \fB%Q\fR
 Priority of the job (generally a very large unsigned integer).
@@ -215,7 +217,7 @@ For all other job states: the list of allocate nodes.
 See the \fBJOB REASON CODES\fR section below for more information.
 .TP
 \fB%s\fR 
-Node selection plugin specific data. Possible data includes:
+Node selection plugin specific data for a job. Possible data includes:
 Geometry requirement of resource allocation (X,Y,Z dimensions), 
 Connection type (TORUS, MESH, or NAV == torus else mesh), 
 Permit rotation of geometry (yes or no), 
@@ -223,7 +225,7 @@ Node use (VIRTUAL or COPROCESSOR),
 etc.
 .TP
 \fB%S\fR 
-Start time of the job or job step
+Start time of the job or job step.
 .TP
 \fB%t\fR 
 Job state, compact form:
@@ -238,25 +240,26 @@ and NODE_FAIL.
 See the \fBJOB STATE CODES\fR section below for more information.
 .TP
 \fB%u\fR 
-User name
+User name for a job or job step.
 .TP
 \fB%U\fR 
-User ID
+User ID for a job or job step.
 .TP
 \fB%x\fR 
-List of node names explicitly excluded by the job
+List of node names explicitly excluded by the job.
 .TP
 \fB%X\fR 
-Number of requested sockets per node
+Number of requested sockets per node for the job.
 .TP
 \fB%Y\fR 
-Number of requested cores per socket
+Number of requested cores per socket for the job.
 .TP
 \fB%Z\fR 
-Number of requested threads per core
+Number of requested threads per core for the job.
 .TP
 \fB%z\fR 
-Extended processor information: number of requested sockets, cores, threads (S:C:T) per node
+Extended processor information: number of requested sockets, cores, 
+threads (S:C:T) per node for the job.
 .RE
 
 .TP
diff --git a/doc/man/man5/slurm.conf.5 b/doc/man/man5/slurm.conf.5
index fb26318c4010335afa3940923593c69da6188e0b..8e1db34460e306ef4545775de40e09d026b6a014 100644
--- a/doc/man/man5/slurm.conf.5
+++ b/doc/man/man5/slurm.conf.5
@@ -1,4 +1,5 @@
-.TH "slurm.conf" "5" "May 2008" "slurm.conf 1.3" "Slurm configuration file"
+.TH "slurm.conf" "5" "June 2008" "slurm.conf 1.3" "Slurm configuration file"
+
 .SH "NAME"
 slurm.conf \- Slurm configuration file 
 .SH "DESCRIPTION"
@@ -242,14 +243,15 @@ Also see \fBAccountingStoragePort\fR and \fBJobCompPort\fR.
 
 .TP
 \fBDefaultStorageType\fR
-.ad l
 Define the accounting and job completion storage mechanism type.
 Acceptable values at present include 
 "filetxt", "gold", "mysql", "none", "pgsql", and "slurmdbd".
 The value "filetxt" indicates that records will be written to a the file.
 The value "gold" indicates that records will be written to Gold
 (http://www.clusterresources.com/pages/products/gold-allocation-manager.php),
+.na
 which maintains its own database.
+.ad
 The value "mysql" indicates that accounting records will be written to 
 a mysql database.
 The default value is "none", which means that records are not maintained. 
@@ -258,7 +260,6 @@ database.
 The value "slurmdbd" indicates that records will be written to SlurmDbd,
 which maintains its own database. See "man slurmdbd for more information".
 Also see \fBAccountingStorageType\fR  and \fBJobCompType\fR.
-.ad
 
 .TP
 \fBDefaultStorageUser\fR
@@ -273,6 +274,13 @@ If set to "YES" then user root will be prevented from running any jobs.
 The default value is "NO", meaning user root will be able to execute jobs.
 \fBDisableRootJobs\fR may also be set by partition.
 
+.TP
+\fBEnforcePartLimits\fR
+If set to "YES" then jobs which exceed a partition's size and/or time limits
+will be rejected at submission time. If set to "NO" then the job will be 
+accepted and remain queued until the partition limits are altered.
+The default value is "NO".
+
 \fBEpilog\fR
 Fully qualified pathname of a script to execute as user root on every 
 node when a user's job completes (e.g. "/usr/local/slurm/epilog"). This may 
@@ -332,7 +340,9 @@ The default value is 1.
 Used for Moab scheduled jobs only. Controls how long job should wait
 in seconds for loading the user's environment before attempting to 
 load it from a cache file. Applies when the srun or sbatch 
-\fI--get-user-env\fR option is used. Default value is 2 seconds.
+\fI--get-user-env\fR option is used. If set to 0 then always load 
+the user's environment from the cache file.
+The default value is 2 seconds.
 
 .TP
 \fBHealthCheckInterval\fR
@@ -847,6 +857,8 @@ NOTE: This implies \fIShared=YES\fR or \fIShared=FORCE\fR for all partitions.
 \fBSlurmUser\fR
 The name of the user that the \fBslurmctld\fR daemon executes as. 
 For security purposes, a user other than "root" is recommended.
+This user must exist on all nodes of the cluster for authentication 
+of communications between SLURM components.
 The default value is "root". 
 
 .TP
@@ -1246,9 +1258,13 @@ The node configuration specified the following information:
 \fBNodeName\fR
 Name that SLURM uses to refer to a node (or base partition for 
 BlueGene systems). 
-Typically this would be the string that "/bin/hostname \-s" 
-returns, however it may be an arbitrary string if 
-\fBNodeHostname\fR is specified.
+Typically this would be the string that "/bin/hostname \-s" returns.
+It may also be the fully qualified domane name as returned by "/bin/hostname \-f"
+(e.g. "foo1.bar.com"), although that may prevent use of hostlist expressions
+(the numeric portion in brackets must be at the end of the string).
+Fully qualified domain names are incompatible with the
+switch/elan and switch/federation plugins at this time.
+It may also be an arbitrary string if \fBNodeHostname\fR is specified.
 If the \fBNodeName\fR is "DEFAULT", the values specified 
 with that record will apply to subsequent node specifications   
 unless explicitly set to other values in that node record or 
@@ -1261,7 +1277,12 @@ considered adjacent in the computer.
 
 .TP
 \fBNodeHostname\fR
-The string that "/bin/hostname \-s" returns. 
+Typically this would be the string that "/bin/hostname \-s" returns. 
+It may also be the fully qualified domain name as returned by "/bin/hostname \-f"
+(e.g. "foo1.bar.com"), although that may prevent use of hostlist expressions
+(the numeric portion in brackets must be at the end of the string).
+Fully qualified domain names are incompatible with the
+switch/elan and switch/federation plugins at this time.
 A node range expression can be used to specify a set of nodes.
 If an expression is used, the number of nodes identified by 
 \fBNodeHostname\fR on a line in the configuration file must 
@@ -1542,7 +1563,9 @@ memory as a consumable resource and the \fB\-\-mem\fR option
 should be used for job allocations.
 For more information see the following web page:
 \fIhttps://computing.llnl.gov/linux/slurm/cons_res_share.html\fR.
+.na
 Possible values for \fBShared\fR are "EXCLUSIVE", "FORCE", "YES", and "NO".
+.ad
 .RS
 .TP 12
 \fBEXCLUSIVE\fR
diff --git a/doc/man/man5/slurmdbd.conf.5 b/doc/man/man5/slurmdbd.conf.5
index 0b55090de0d388f068c1a228edf7688c2c1b2af0..e1435514f311c7814bb8445855c702045fefb6ac 100644
--- a/doc/man/man5/slurmdbd.conf.5
+++ b/doc/man/man5/slurmdbd.conf.5
@@ -1,4 +1,4 @@
-.TH "slurmdbd.conf" "5" "February 2008" "slurmdbd.conf 1.3" "Slurm configuration file"
+.TH "slurmdbd.conf" "5" "June 2008" "slurmdbd.conf 1.3" "Slurm configuration file"
 .SH "NAME"
 slurmdbd.conf \- Slurm Database Daemon (SlurmDBD) configuration file 
 
@@ -21,6 +21,32 @@ This file should be protected from unauthorized access since it
 contains a database password.
 The overall configuration parameters available include:
 
+.TP
+\fBAllowView\fR
+This controls who can view accounting records. 
+A value of "user" prevents normal users from viewing accounting records
+that are not generated directly by them (preventing them from viewing 
+any other users jobs).
+A value of "account" prevents normal users from viewing accounting 
+records that are not generated by users in the same account.
+A value of "none" lets any user view accounting records generated by 
+any other user.
+The default value is "none".
+
+.TP
+\fBArchiveAge\fR
+Move data over this age out of the database to an archive.
+The time is a numeric value and is a number of days.
+If zero, then never archive the data.
+The default value is zero.
+
+.TP
+\fBArchiveScript\fR
+This script is executed periodically in order to transfer accounting
+records out of the database into an archive. The script is executed 
+with a single argument, the value of \fBArchiveTime\fR as described
+below.
+
 .TP
 \fBAuthInfo\fR
 Additional information to be used for authentication of communications 
@@ -84,6 +110,14 @@ Values from 0 to 7 are legal, with `0' being "quiet" operation and
 `7' being insanely verbose.
 The default value is 3.
 
+.TP
+\fBJobPurge\fR
+Individual job records over this age are purged from the database.
+Aggregated information will be preserved indefinitely.
+The time is a numeric value and is a number of days.
+If zero, then job records are never purged.
+The default value is 360 days.
+
 .TP
 \fBLogFile\fR
 Fully qualified pathname of a file into which the Slurm Database Daemon's 
@@ -116,6 +150,14 @@ and have the same user ID as the hosts on which \fBslurmctld\fR execute.
 For security purposes, a user other than "root" is recommended.
 The default value is "root". 
 
+.TP
+\fBStepPurge\fR
+Individual job step records over this age are purged from the database.
+Aggregated information will be preserved indefinitely.
+The time is a numeric value and is a number of days.
+If zero, then job step records are never purged.
+The default value is 30 days.
+
 .TP
 \fBStorageHost\fR
 Define the name of the host the database is running where we are going
@@ -168,6 +210,10 @@ with to store the job accounting data.
 .br
 #
 .br
+ArchiveAge=365   # keep 1 year of data online
+.br
+ArchiveScript=/usr/sbin/slurm.dbd.archive
+.br
 AuthInfo=/var/run/munge/munge.socket.2
 .br
 AuthType=auth/munge
@@ -176,6 +222,10 @@ DbdHost=db_host
 .br
 DebugLevel=4
 .br
+JobPurge=90
+.br
+StepPurge=30
+.br
 LogFile=/var/log/slurmdbd.log
 .br
 PidFile=/var/tmp/jette/slurmdbd.pid
diff --git a/slurm.spec b/slurm.spec
index 9448a966345edd1ed6ad3ea8bad152fa6eb881f5..57293ea4226b70fef60806a0b4a08981af689668 100644
--- a/slurm.spec
+++ b/slurm.spec
@@ -1,4 +1,4 @@
-# $Id: slurm.spec 14109 2008-05-22 16:26:23Z jette $
+# $Id: slurm.spec 14267 2008-06-16 18:13:01Z jette $
 #
 # Note that this package is not relocatable
 
@@ -64,22 +64,19 @@
 %endif
 
 Name:    slurm
-Version: 1.3.3
-Release: 1
+Version: 1.3.4
+Release: 1%{?dist}
 
 Summary: Simple Linux Utility for Resource Management
 
 License: GPL 
 Group: System Environment/Base
-Source: slurm-1.3.3.tar.bz2
+Source: slurm-1.3.4.tar.bz2
 BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}
 URL: https://computing.llnl.gov/linux/slurm/
 
 Requires: slurm-plugins
 
-%ifnos aix
-BuildRequires: ncurses-devel
-%endif
 %ifos linux
 BuildRequires: python
 %endif
@@ -234,7 +231,7 @@ SLURM process tracking plugin for SGI job containers.
 #############################################################################
 
 %prep
-%setup -n slurm-1.3.3
+%setup -n slurm-1.3.4
 
 %build
 %configure --program-prefix=%{?_program_prefix:%{_program_prefix}} \
@@ -247,8 +244,6 @@ SLURM process tracking plugin for SGI job containers.
 
 make %{?_smp_mflags} 
 
-
-
 %install
 rm -rf "$RPM_BUILD_ROOT"
 mkdir -p "$RPM_BUILD_ROOT"
@@ -276,45 +271,13 @@ if [ -d /etc/init.d ]; then
    echo "/etc/init.d/slurm"    >> $LIST
 fi
 
-LIST=./munge.files
-touch $LIST
-test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/auth_munge.so   &&
-  echo %{_libdir}/slurm/auth_munge.so             >> $LIST
-test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/crypto_munge.so &&
-  echo %{_libdir}/slurm/crypto_munge.so           >> $LIST
-
-LIST=./switch_elan.files
-touch $LIST
-test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/switch_elan.so &&
-  echo %{_libdir}/slurm/switch_elan.so            >> $LIST
-test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/proctrack_rms.so &&
-  echo %{_libdir}/slurm/proctrack_rms.so          >> $LIST
-
 %if %{slurm_with aix}
 install -D -m644 etc/federation.conf.example ${RPM_BUILD_ROOT}%{_sysconfdir}/federation.conf.example
-LIST=./aix_federation.files
-touch $LIST
-test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/switch_federation.so &&
-  echo %{_libdir}/slurm/switch_federation.so      >> $LIST
-test -f  $RPM_BUILD_ROOT/%{_libdir}/slurm/proctrack_aix.so &&
-  echo %{_libdir}/slurm/proctrack_aix.so          >> $LIST
-test -f  $RPM_BUILD_ROOT/%{_libdir}/slurm/checkpoint_aix.so &&
-  echo %{_libdir}/slurm/checkpoint_aix.so         >> $LIST
-echo "%config %{_sysconfdir}/federation.conf.example" >> $LIST
 %endif
 
-LIST=./perlapi.files
-touch $LIST
-test -f $RPM_BUILD_ROOT/%{_perldir}/Slurm.pm &&
-  echo "%{_perldir}/Slurm.pm"                 >> $LIST
-test -f $RPM_BUILD_ROOT/%{_perldir}/auto/Slurm/Slurm.so &&
-  echo "%{_perldir}/auto/Slurm/Slurm.so"      >> $LIST
-test -f $RPM_BUILD_ROOT/%{_mandir}/man3/Slurm.3 &&
-echo "%{_mandir}/man3/Slurm.3"                 >> $LIST
-test -f $RPM_BUILD_ROOT/%{_perldir}/auto/Slurm/Slurm.bs &&
-  echo "%{_perldir}/auto/Slurm/Slurm.bs"      >> $LIST
-test -f $RPM_BUILD_ROOT/%{_perldir}/auto/Slurm/autosplit.ix &&
-  echo "%{_perldir}/auto/Slurm/autosplit.ix"      >> $LIST
+%if %{slurm_with bluegene}
+install -D -m644 etc/bluegene.conf.example ${RPM_BUILD_ROOT}%{_sysconfdir}/bluegene.conf.example
+%endif
 
 LIST=./slurmdbd.files
 touch $LIST
@@ -330,51 +293,6 @@ test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/crypto_openssl.so &&
 test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/accounting_storage_gold.so
    echo %{_libdir}/slurm/accounting_storage_gold.so >> $LIST
 
-# Build file lists for optional plugin packages
-for plugin in auth_authd; do
-   LIST=./${plugin}.files
-   touch $LIST
-   test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/${plugin}.so &&
-     echo %{_libdir}/slurm/${plugin}.so > $LIST
-done
-
-
-
-
-
-
-LIST=./torque.files
-touch $LIST
-echo "%{_bindir}/pbsnodes"                    >> $LIST
-echo "%{_bindir}/qdel"                        >> $LIST
-echo "%{_bindir}/qhold"                       >> $LIST
-echo "%{_bindir}/qrls"                        >> $LIST
-echo "%{_bindir}/qstat"                       >> $LIST
-echo "%{_bindir}/qsub"                        >> $LIST
-echo "%{_bindir}/mpiexec"                     >> $LIST
-
-
-%if %{slurm_with bluegene}
-install -D -m644 etc/bluegene.conf.example ${RPM_BUILD_ROOT}%{_sysconfdir}/bluegene.conf.example
-LIST=./bluegene.files
-touch $LIST
-test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/select_bluegene.so &&
-  echo "%{_libdir}/slurm/select_bluegene.so"      >> $LIST
-test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/libsched_if64.so &&
-  echo "%{_libdir}/slurm/libsched_if64.so"        >> $LIST
-echo "%{_mandir}/man5/bluegene.*"                 >> $LIST
-echo "%{_sbindir}/slurm_epilog"                   >> $LIST
-echo "%{_sbindir}/slurm_prolog"                   >> $LIST
-echo "%{_sbindir}/sfree"                          >> $LIST
-echo "%config %{_sysconfdir}/bluegene.conf.example" >> $LIST
-%endif
-
-%if %{slurm_with sgijob}
-LIST=./sgi-job.files
-touch $LIST
-test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/proctrack_sgi_job.so &&
-echo "%{_libdir}/slurm/proctrack_sgi_job.so" >> $LIST
-%endif
 
 #############################################################################
 
@@ -421,7 +339,7 @@ rm -rf $RPM_BUILD_ROOT
 %{_libdir}/libpmi.la
 %{_libdir}/libslurm.a
 %{_libdir}/libslurm.la
-%{_mandir}/man3/*
+%{_mandir}/man3/slurm_*
 #############################################################################
 
 %if %{slurm_with auth_none}
@@ -432,30 +350,48 @@ rm -rf $RPM_BUILD_ROOT
 #############################################################################
 
 %if %{slurm_with munge}
-%files -f munge.files munge
+%files munge
 %defattr(-,root,root)
+%{_libdir}/slurm/auth_munge.so
+%{_libdir}/slurm/crypto_munge.so
 %endif
 #############################################################################
 
 %if %{slurm_with authd}
-%files -f auth_authd.files auth-authd
 %defattr(-,root,root)
+%files auth-authd
+%{_libdir}/slurm/auth_authd.so
 %endif
 #############################################################################
 
 %if %{slurm_with bluegene}
-%files -f bluegene.files bluegene
+%files bluegene
 %defattr(-,root,root)
+%{_libdir}/slurm/select_bluegene.so
+%{_libdir}/slurm/libsched_if64.so
+%{_mandir}/man5/bluegene.*
+%{_sbindir}/slurm_epilog
+%{_sbindir}/slurm_prolog
+%{_sbindir}/sfree
+%config %{_sysconfdir}/bluegene.conf.example
 %endif
 #############################################################################
 
-%files -f perlapi.files perlapi
+%files perlapi
 %defattr(-,root,root)
+%{_perldir}/Slurm.pm
+%{_perldir}/auto/Slurm/Slurm.so
+%{_mandir}/man3/Slurm.*
+%{_perldir}/auto/Slurm/Slurm.bs
+%{_perldir}/auto/Slurm/autosplit.ix
+
 #############################################################################
 
 %if %{slurm_with elan}
-%files -f switch_elan.files switch-elan
+%files switch-elan
 %defattr(-,root,root)
+%{_libdir}/slurm/switch_elan.so
+%{_libdir}/slurm/proctrack_rms.so
 %endif
 #############################################################################
 
@@ -508,19 +444,31 @@ rm -rf $RPM_BUILD_ROOT
 %{_libdir}/slurm/task_none.so
 #############################################################################
 
-%files -f torque.files torque
+%files torque
 %defattr(-,root,root)
+%{_bindir}/pbsnodes
+%{_bindir}/qdel
+%{_bindir}/qhold
+%{_bindir}/qrls
+%{_bindir}/qstat
+%{_bindir}/qsub
+%{_bindir}/mpiexec
 #############################################################################
 
 %if %{slurm_with aix}
-%files -f aix_federation.files aix-federation
+%files aix-federation
 %defattr(-,root,root)
+%{_libdir}/slurm/switch_federation.so 
+%{_libdir}/slurm/proctrack_aix.so
+%{_libdir}/slurm/checkpoint_aix.so
+%config %{_sysconfdir}/federation.conf.example
 %endif
 #############################################################################
 
 %if %{slurm_with sgijob}
-%files -f sgi-job.files proctrack-sgi-job
+%files proctrack-sgi-job
 %defattr(-,root,root)
+%{_libdir}/slurm/proctrack_sgi_job.so
 %endif
 #############################################################################
 
diff --git a/slurm/slurm.h.in b/slurm/slurm.h.in
index 04fc802e251d02e5de4e59bf72e51ced8a8ade06..6f5573ed26a6f3fb5c1aee5814d8c9988c2d95a0 100644
--- a/slurm/slurm.h.in
+++ b/slurm/slurm.h.in
@@ -949,6 +949,8 @@ typedef struct slurm_ctl_conf {
 	char *crypto_type;	/* cryptographic signature plugin */
 	uint32_t def_mem_per_task; /* default MB memory per spawned task */
 	uint16_t disable_root_jobs; /* if set then user root can't run jobs */
+	uint16_t enforce_part_limits;	/* if set, reject job exceeding 
+					 * partition size and/or time limits */
 	char *epilog;		/* pathname of job epilog */
 	uint32_t epilog_msg_time;  /* usecs for slurmctld to process an
 				 * epilog complete message */
diff --git a/slurm/slurm_errno.h b/slurm/slurm_errno.h
index 3e9b7e9b7a2466ca387a85cecf236e5a58d873d1..eafe21aa7c657f4acd2aaf241168eedfae592a85 100644
--- a/slurm/slurm_errno.h
+++ b/slurm/slurm_errno.h
@@ -96,6 +96,7 @@ enum {
 	SLURM_PROTOCOL_INSANE_MSG_LENGTH,
 	SLURM_MPI_PLUGIN_NAME_INVALID,
 	SLURM_MPI_PLUGIN_PRELAUNCH_SETUP_FAILED,
+	SLURM_PLUGIN_NAME_INVALID,
 
 	/* communication failures to/from slurmctld */
 	SLURMCTLD_COMMUNICATIONS_CONNECTION_ERROR =     1800,
@@ -157,6 +158,7 @@ enum {
 	ESLURM_INVALID_LICENSES,
 	ESLURM_NEED_RESTART,
 	ESLURM_ACCOUNTING_POLICY,
+	ESLURM_INVALID_TIME_LIMIT,
 
 	/* switch specific error codes, specific values defined in plugin module */
 	ESLURM_SWITCH_MIN = 3000,
diff --git a/src/api/config_info.c b/src/api/config_info.c
index e649accd835dd8200a132199883a7b77c4ad8652..db17884c6c42774433f094316ec80a200d6f8236 100644
--- a/src/api/config_info.c
+++ b/src/api/config_info.c
@@ -162,6 +162,13 @@ void slurm_print_ctl_conf ( FILE* out,
 		fprintf(out, "DisableRootJobs         = YES\n");
 	else
 		fprintf(out, "DisableRootJobs         = NO\n");
+#if 0
+/* Add in Slurm v1.4 */
+	if (slurm_ctl_conf_ptr->enforce_part_limits)
+		fprintf(out, "EnforcePartLimits       = YES\n");
+	else
+		fprintf(out, "EnforcePartLimits       = NO\n");
+#endif
 	fprintf(out, "Epilog                  = %s\n",
 		slurm_ctl_conf_ptr->epilog);
 	fprintf(out, "EpilogMsgTime           = %u\n",
diff --git a/src/api/job_info.c b/src/api/job_info.c
index 26bf30965c4ee61be508cb3877c83b3e4ef55b22..dc9c696ebf8f9e21614a3258984ef3a4105d7ab3 100644
--- a/src/api/job_info.c
+++ b/src/api/job_info.c
@@ -1,6 +1,6 @@
 /*****************************************************************************\
  *  job_info.c - get/print the job state information of slurm
- *  $Id: job_info.c 13672 2008-03-19 23:10:58Z jette $
+ *  $Id: job_info.c 14298 2008-06-20 16:45:30Z jette $
  *****************************************************************************
  *  Copyright (C) 2002-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -126,7 +126,7 @@ slurm_sprint_job_info ( job_info_t * job_ptr, int one_liner )
 	int i, j;
 	char time_str[32], select_buf[122];
 	struct group *group_info = NULL;
-	char tmp1[128], tmp2[128];
+	char tmp1[128], tmp2[128], *tmp3_ptr;
 	char tmp_line[512];
 	char *ionodes = NULL;
 	uint16_t exit_status = 0, term_sig = 0;
@@ -209,11 +209,16 @@ slurm_sprint_job_info ( job_info_t * job_ptr, int one_liner )
 		xstrcat(out, "\n   ");
 
 	/****** Line 5 ******/
+	if (job_ptr->job_state == JOB_PENDING)
+		tmp3_ptr = "EligibleTime";
+	else
+		tmp3_ptr = "StartTime";
 	slurm_make_time_str((time_t *)&job_ptr->start_time, time_str,
 		sizeof(time_str));
 	snprintf(tmp_line, sizeof(tmp_line),
-		"JobState=%s StartTime=%s EndTime=",
-		job_state_string(job_ptr->job_state), time_str);
+		"JobState=%s %s=%s EndTime=",
+		job_state_string(job_ptr->job_state), 
+		tmp3_ptr, time_str);
 	xstrcat(out, tmp_line);
 	if ((job_ptr->time_limit == INFINITE) && 
 	    (job_ptr->end_time > time(NULL)))
diff --git a/src/api/step_launch.c b/src/api/step_launch.c
index 2656d598f4bc29fe5f0035972c46f6cd3162ff1a..c74952d17373cd3d16c5e5632bd2f747ae47516a 100644
--- a/src/api/step_launch.c
+++ b/src/api/step_launch.c
@@ -1,9 +1,8 @@
 /*****************************************************************************\
  *  step_launch.c - launch a parallel job step
- *
- *  $Id: step_launch.c 14142 2008-05-28 20:07:50Z jette $
  *****************************************************************************
- *  Copyright (C) 2006 The Regents of the University of California.
+ *  Copyright (C) 2006-2007 The Regents of the University of California.
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Christopher J. Morrone <morrone2@llnl.gov>
  *  LLNL-CODE-402394.
@@ -189,7 +188,7 @@ int slurm_step_launch (slurm_step_ctx_t *ctx, char *launcher_host,
 	launch.job_step_id = ctx->step_resp->job_step_id;
 	if (params->env == NULL) {
 		/* if the user didn't specify an environment, grab the
-		   environment of the running process */
+		 * environment of the running process */
 		env_array_merge(&env, (const char **)environ);
 	} else {
 		env_array_merge(&env, (const char **)params->env);
@@ -741,10 +740,18 @@ _launch_handler(struct step_launch_state *sls, slurm_msg_t *resp)
 
 	pthread_mutex_lock(&sls->lock);
 
-	for (i = 0; i < msg->count_of_pids; i++) {
-		bit_set(sls->tasks_started, msg->task_ids[i]);
+	if (msg->return_code) {
+		for (i = 0; i < msg->count_of_pids; i++) {
+			error("task %u launch failed: %s", 
+			      msg->task_ids[i], 
+			      slurm_strerror(msg->return_code));
+			bit_set(sls->tasks_started, msg->task_ids[i]);
+			bit_set(sls->tasks_exited, msg->task_ids[i]);
+		}
+	} else {
+		for (i = 0; i < msg->count_of_pids; i++)
+			bit_set(sls->tasks_started, msg->task_ids[i]);
 	}
-
 	if (sls->callback.task_start != NULL)
 		(sls->callback.task_start)(msg);
 
@@ -770,7 +777,7 @@ _exit_handler(struct step_launch_state *sls, slurm_msg_t *exit_msg)
 	pthread_mutex_lock(&sls->lock);
 
 	for (i = 0; i < msg->num_tasks; i++) {
-		debug("task %d done", msg->task_id_list[i]);
+		debug("task %u done", msg->task_id_list[i]);
 		bit_set(sls->tasks_exited, msg->task_id_list[i]);
 	}
 
diff --git a/src/common/Makefile.am b/src/common/Makefile.am
index 8d61a722ee3b1107e91812ab20e5731f9e97e302..b1a86ac22766ca094ee4deae1c5ded7f3f47300e 100644
--- a/src/common/Makefile.am
+++ b/src/common/Makefile.am
@@ -49,6 +49,7 @@ libcommon_la_SOURCES = 			\
 	parse_spec.c parse_spec.h	\
 	plugin.c plugin.h		\
 	plugrack.c plugrack.h		\
+	print_fields.c print_fields.h	\
 	read_config.c read_config.h	\
 	node_select.c node_select.h	\
 	env.c env.h      		\
diff --git a/src/common/Makefile.in b/src/common/Makefile.in
index ae339dd578e880ba3ab2f652890c5d9ec0cbb745..10ada442258550ce65a5988c40b8283a17c154bb 100644
--- a/src/common/Makefile.in
+++ b/src/common/Makefile.in
@@ -85,12 +85,12 @@ am__libcommon_la_SOURCES_DIST = assoc_mgr.c assoc_mgr.h xmalloc.c \
 	safeopen.c safeopen.h bitstring.c bitstring.h mpi.c mpi.h \
 	pack.c pack.h parse_config.c parse_config.h parse_spec.c \
 	parse_spec.h plugin.c plugin.h plugrack.c plugrack.h \
-	read_config.c read_config.h node_select.c node_select.h env.c \
-	env.h slurm_cred.h slurm_cred.c slurm_errno.c \
-	slurm_protocol_api.c slurm_protocol_api.h \
-	slurm_protocol_pack.c slurm_protocol_pack.h \
-	slurm_protocol_util.c slurm_protocol_util.h \
-	slurm_protocol_socket_implementation.c \
+	print_fields.c print_fields.h read_config.c read_config.h \
+	node_select.c node_select.h env.c env.h slurm_cred.h \
+	slurm_cred.c slurm_errno.c slurm_protocol_api.c \
+	slurm_protocol_api.h slurm_protocol_pack.c \
+	slurm_protocol_pack.h slurm_protocol_util.c \
+	slurm_protocol_util.h slurm_protocol_socket_implementation.c \
 	slurm_protocol_socket_common.h slurm_protocol_common.h \
 	slurm_protocol_interface.h slurm_protocol_defs.c \
 	slurm_protocol_defs.h slurm_rlimits_info.h \
@@ -112,17 +112,18 @@ am_libcommon_la_OBJECTS = assoc_mgr.lo xmalloc.lo xassert.lo \
 	xstring.lo xsignal.lo forward.lo strlcpy.lo list.lo net.lo \
 	fd.lo log.lo cbuf.lo safeopen.lo bitstring.lo mpi.lo pack.lo \
 	parse_config.lo parse_spec.lo plugin.lo plugrack.lo \
-	read_config.lo node_select.lo env.lo slurm_cred.lo \
-	slurm_errno.lo slurm_protocol_api.lo slurm_protocol_pack.lo \
-	slurm_protocol_util.lo slurm_protocol_socket_implementation.lo \
-	slurm_protocol_defs.lo slurm_rlimits_info.lo slurmdbd_defs.lo \
-	uid.lo util-net.lo slurm_auth.lo jobacct_common.lo \
-	slurm_accounting_storage.lo slurm_jobacct_gather.lo \
-	slurm_jobcomp.lo switch.lo arg_desc.lo malloc.lo getopt.lo \
-	getopt1.lo $(am__objects_1) slurm_selecttype_info.lo \
-	slurm_resource_info.lo hostlist.lo slurm_step_layout.lo \
-	checkpoint.lo parse_time.lo job_options.lo global_defaults.lo \
-	timers.lo stepd_api.lo proc_args.lo
+	print_fields.lo read_config.lo node_select.lo env.lo \
+	slurm_cred.lo slurm_errno.lo slurm_protocol_api.lo \
+	slurm_protocol_pack.lo slurm_protocol_util.lo \
+	slurm_protocol_socket_implementation.lo slurm_protocol_defs.lo \
+	slurm_rlimits_info.lo slurmdbd_defs.lo uid.lo util-net.lo \
+	slurm_auth.lo jobacct_common.lo slurm_accounting_storage.lo \
+	slurm_jobacct_gather.lo slurm_jobcomp.lo switch.lo arg_desc.lo \
+	malloc.lo getopt.lo getopt1.lo $(am__objects_1) \
+	slurm_selecttype_info.lo slurm_resource_info.lo hostlist.lo \
+	slurm_step_layout.lo checkpoint.lo parse_time.lo \
+	job_options.lo global_defaults.lo timers.lo stepd_api.lo \
+	proc_args.lo
 am__EXTRA_libcommon_la_SOURCES_DIST = unsetenv.c unsetenv.h
 libcommon_la_OBJECTS = $(am_libcommon_la_OBJECTS)
 libcommon_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
@@ -363,6 +364,7 @@ libcommon_la_SOURCES = \
 	parse_spec.c parse_spec.h	\
 	plugin.c plugin.h		\
 	plugrack.c plugrack.h		\
+	print_fields.c print_fields.h	\
 	read_config.c read_config.h	\
 	node_select.c node_select.h	\
 	env.c env.h      		\
@@ -525,6 +527,7 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/plugin.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/plugrack.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/plugstack.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/print_fields.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proc_args.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/read_config.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/safeopen.Plo@am__quote@
diff --git a/src/common/assoc_mgr.c b/src/common/assoc_mgr.c
index 1a35ca4322188de0af46159f32675d3cda38c890..ac7ef16dfdea28d5d9d5d8e48a7677664f121a98 100644
--- a/src/common/assoc_mgr.c
+++ b/src/common/assoc_mgr.c
@@ -47,9 +47,44 @@ static List local_association_list = NULL;
 static List local_user_list = NULL;
 static char *local_cluster_name = NULL;
 
+void (*remove_assoc_notify) (acct_association_rec_t *rec) = NULL;
+
 static pthread_mutex_t local_association_lock = PTHREAD_MUTEX_INITIALIZER;
 static pthread_mutex_t local_user_lock = PTHREAD_MUTEX_INITIALIZER;
 
+/* locks should be put in place before calling this function */
+static int _set_assoc_parent_and_user(acct_association_rec_t *assoc)
+{
+	if(!assoc) {
+		error("you didn't give me an association");
+		return SLURM_ERROR;
+	}
+
+	if(assoc->parent_id) {
+		acct_association_rec_t *assoc2 = NULL;
+		ListIterator itr = list_iterator_create(local_association_list);
+		while((assoc2 = list_next(itr))) {
+			if(assoc2->id == assoc->parent_id) {
+				assoc->parent_acct_ptr = assoc2;
+				break;
+			}
+		}
+		list_iterator_destroy(itr);
+	}
+	if(assoc->user) {
+		struct passwd *passwd_ptr = getpwnam(assoc->user);
+		if(passwd_ptr) 
+			assoc->uid = passwd_ptr->pw_uid;
+		else
+			assoc->uid = (uint32_t)NO_VAL;	
+	} else {
+		assoc->uid = (uint32_t)NO_VAL;	
+	}
+	//log_assoc_rec(assoc);
+
+	return SLURM_SUCCESS;
+}
+
 static int _get_local_association_list(void *db_conn, int enforce)
 {
 	acct_association_cond_t assoc_q;
@@ -97,31 +132,10 @@ static int _get_local_association_list(void *db_conn, int enforce)
 		}
 	} else {
 		acct_association_rec_t *assoc = NULL;
-		acct_association_rec_t *assoc2 = NULL;
-		struct passwd *passwd_ptr = NULL;
 		ListIterator itr = list_iterator_create(local_association_list);
-		ListIterator itr2 = 
-			list_iterator_create(local_association_list);
 		//START_TIMER;
-		while((assoc = list_next(itr))) {
-			if(assoc->parent_id) {
-				while((assoc2 = list_next(itr2))) {
-					if(assoc2->id == assoc->parent_id) {
-						assoc->parent_acct_ptr = assoc2;
-						break;
-					}
-				}
-				list_iterator_reset(itr2);
-			}
-			if(!assoc->user) {
-				continue;
-			}
-			passwd_ptr = getpwnam(assoc->user);
-			if(passwd_ptr) 
-				assoc->uid = passwd_ptr->pw_uid;
-			//log_assoc_rec(assoc);
-		}
-		list_iterator_destroy(itr2);
+		while((assoc = list_next(itr))) 
+			_set_assoc_parent_and_user(assoc);
 		list_iterator_destroy(itr);
 		//END_TIMER2("load_associations");
 	}
@@ -135,6 +149,7 @@ static int _get_local_user_list(void *db_conn, int enforce)
 	acct_user_cond_t user_q;
 
 	memset(&user_q, 0, sizeof(acct_user_cond_t));
+	user_q.with_coords = 1;
 
 	slurm_mutex_lock(&local_user_lock);
 	if(local_user_list)
@@ -150,14 +165,38 @@ static int _get_local_user_list(void *db_conn, int enforce)
 		} else {
 			return SLURM_SUCCESS;
 		}		
-	} 
+	} else {
+		acct_user_rec_t *user = NULL;
+		struct passwd *passwd_ptr = NULL;
+		ListIterator itr = list_iterator_create(local_user_list);
+		//START_TIMER;
+		while((user = list_next(itr))) {
+			passwd_ptr = getpwnam(user->name);
+			if(passwd_ptr) 
+				user->uid = passwd_ptr->pw_uid;
+			else
+				user->uid = (uint32_t)NO_VAL;
+		}
+		list_iterator_destroy(itr);
+		//END_TIMER2("load_users");
+	}
+	
+
 
 	slurm_mutex_unlock(&local_user_lock);
 	return SLURM_SUCCESS;
 }
 
-extern int assoc_mgr_init(void *db_conn, int enforce)
+extern int assoc_mgr_init(void *db_conn, assoc_init_args_t *args)
 {
+	int enforce = 0;
+
+	if(args) {
+		enforce = args->enforce;
+		if(args->remove_assoc_notify)
+			remove_assoc_notify = args->remove_assoc_notify;
+	}
+
 	if(!local_cluster_name && !slurmdbd_conf)
 		local_cluster_name = slurm_get_cluster_name();
 
@@ -172,7 +211,7 @@ extern int assoc_mgr_init(void *db_conn, int enforce)
 	return SLURM_SUCCESS;
 }
 
-extern int assoc_mgr_fini()
+extern int assoc_mgr_fini(void)
 {
 	if(local_association_list) 
 		list_destroy(local_association_list);
@@ -207,7 +246,7 @@ extern int assoc_mgr_fill_in_assoc(void *db_conn, acct_association_rec_t *assoc,
 		if(!assoc->acct) {
 			acct_user_rec_t user;
 
-			if(!assoc->uid) {
+			if(assoc->uid == (uint32_t)NO_VAL) {
 				if(enforce) {
 					error("get_assoc_id: "
 					      "Not enough info to "
@@ -248,7 +287,8 @@ extern int assoc_mgr_fill_in_assoc(void *db_conn, acct_association_rec_t *assoc,
 			}
 			continue;
 		} else {
-			if(!assoc->uid && found_assoc->uid) {
+			if(assoc->uid == (uint32_t)NO_VAL
+			   && found_assoc->uid != (uint32_t)NO_VAL) {
 				debug3("we are looking for a "
 				       "nonuser association");
 				continue;
@@ -260,8 +300,9 @@ extern int assoc_mgr_fill_in_assoc(void *db_conn, acct_association_rec_t *assoc,
 			
 			if(found_assoc->acct 
 			   && strcasecmp(assoc->acct, found_assoc->acct)) {
-				   debug3("not the right account");
-				   continue;
+				debug3("not the right account %s != %s",
+				       assoc->acct, found_assoc->acct);
+				continue;
 			}
 
 			/* only check for on the slurmdbd */
@@ -402,7 +443,7 @@ extern int assoc_mgr_is_user_acct_coord(void *db_conn,
 	}
 	list_iterator_destroy(itr);
 		
-	if(!found_user) {
+	if(!found_user || !found_user->coord_accts) {
 		slurm_mutex_unlock(&local_user_lock);
 		return 0;
 	}
@@ -536,12 +577,15 @@ extern int assoc_mgr_update_local_assocs(acct_update_object_t *update)
 				//rc = SLURM_ERROR;
 				break;
 			}
+			_set_assoc_parent_and_user(object);
 			list_append(local_association_list, object);
 		case ACCT_REMOVE_ASSOC:
 			if(!rec) {
 				//rc = SLURM_ERROR;
 				break;
 			}
+			if (remove_assoc_notify)
+				remove_assoc_notify(rec);
 			list_delete_item(itr);
 			break;
 		default:
@@ -581,8 +625,10 @@ extern int assoc_mgr_update_local_users(acct_update_object_t *update)
 {
 	acct_user_rec_t * rec = NULL;
 	acct_user_rec_t * object = NULL;
+		
 	ListIterator itr = NULL;
 	int rc = SLURM_SUCCESS;
+	struct passwd *passwd_ptr = NULL;
 
 	if(!local_user_list)
 		return SLURM_SUCCESS;
@@ -622,6 +668,12 @@ extern int assoc_mgr_update_local_users(acct_update_object_t *update)
 				//rc = SLURM_ERROR;
 				break;
 			}
+			passwd_ptr = getpwnam(object->name);
+			if(passwd_ptr) 
+				object->uid = passwd_ptr->pw_uid;
+			else
+				object->uid = (uint32_t)NO_VAL;
+
 			list_append(local_user_list, object);
 		case ACCT_REMOVE_USER:
 			if(!rec) {
@@ -630,6 +682,23 @@ extern int assoc_mgr_update_local_users(acct_update_object_t *update)
 			}
 			list_delete_item(itr);
 			break;
+		case ACCT_ADD_COORD:
+		case ACCT_REMOVE_COORD:
+			if(!rec) {
+				//rc = SLURM_ERROR;
+				break;
+			}
+			/* We always get a complete list here */
+			if(!object->coord_accts) {
+				if(rec->coord_accts)
+					list_flush(rec->coord_accts);
+			} else {
+				if(rec->coord_accts)
+					list_destroy(rec->coord_accts);
+				rec->coord_accts = object->coord_accts;
+				object->coord_accts = NULL;
+			}
+			break;
 		default:
 			break;
 		}
@@ -673,3 +742,21 @@ extern int assoc_mgr_validate_assoc_id(void *db_conn,
 	return SLURM_ERROR;
 }
 
+extern void assoc_mgr_clear_used_info(void)
+{
+	ListIterator itr = NULL;
+	acct_association_rec_t * found_assoc = NULL;
+
+	if (!local_association_list)
+		return;
+
+	slurm_mutex_lock(&local_association_lock);
+	itr = list_iterator_create(local_association_list);
+	while((found_assoc = list_next(itr))) {
+		found_assoc->used_jobs  = 0;
+		found_assoc->used_share = 0;
+	}
+	list_iterator_destroy(itr);
+	slurm_mutex_unlock(&local_association_lock);
+}
+
diff --git a/src/common/assoc_mgr.h b/src/common/assoc_mgr.h
index d0d1826c53e498cf67d716528449cb8d921fcf9e..512b753d424ee4910c61d2f9bd668b5d1dd833b3 100644
--- a/src/common/assoc_mgr.h
+++ b/src/common/assoc_mgr.h
@@ -49,6 +49,11 @@
 #include <slurm/slurm.h>
 #include <slurm/slurm_errno.h>
 
+typedef struct {
+	int enforce;
+	void (*remove_assoc_notify) (acct_association_rec_t *rec);
+} assoc_init_args_t;
+
 /* 
  * get info from the storage 
  * IN/OUT:  user - acct_user_rec_t with the name set of the user.
@@ -92,8 +97,8 @@ extern acct_admin_level_t assoc_mgr_get_admin_level(void *db_conn,
 extern int assoc_mgr_is_user_acct_coord(void *db_conn, uint32_t uid,
 					char *acct);
 
-extern int assoc_mgr_init(void *db_conn, int enforce);
-extern int assoc_mgr_fini();
+extern int assoc_mgr_init(void *db_conn, assoc_init_args_t *args);
+extern int assoc_mgr_fini(void);
 
 /* 
  * update associations in local cache 
@@ -120,4 +125,10 @@ extern int assoc_mgr_validate_assoc_id(void *db_conn,
 				       uint32_t assoc_id,
 				       int enforce);
 
+/*
+ * clear the used_* fields from every assocation, 
+ *	used on reconfiguration
+ */
+extern void assoc_mgr_clear_used_info(void);
+
 #endif /* _SLURM_ASSOC_MGR_H */
diff --git a/src/common/checkpoint.c b/src/common/checkpoint.c
index 54e7874b06ad9a3b23bbf0dcf696f5282cfe4773..e3aa5193854de893caba6f21312ee1fe919b775d 100644
--- a/src/common/checkpoint.c
+++ b/src/common/checkpoint.c
@@ -1,6 +1,6 @@
 /*****************************************************************************\
  *  checkpoint.c - implementation-independent checkpoint functions
- *  $Id: checkpoint.c 13672 2008-03-19 23:10:58Z jette $
+ *  $Id: checkpoint.c 14208 2008-06-06 19:15:24Z da $
  *****************************************************************************
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -127,6 +127,8 @@ _slurm_checkpoint_context_destroy( slurm_checkpoint_context_t c )
 		if ( plugrack_destroy( c->plugin_list ) != SLURM_SUCCESS ) {
 			 return SLURM_ERROR;
 		}
+	} else {
+		plugin_unload(c->cur_plugin);
 	}
 
 	xfree( c->checkpoint_type );
@@ -156,6 +158,16 @@ _slurm_checkpoint_get_ops( slurm_checkpoint_context_t c )
 	};
         int n_syms = sizeof( syms ) / sizeof( char * );
 
+	/* Find the correct plugin. */
+        c->cur_plugin = plugin_load_and_link(c->checkpoint_type, n_syms, syms,
+					     (void **) &c->ops);
+        if ( c->cur_plugin != PLUGIN_INVALID_HANDLE ) 
+        	return &c->ops;
+
+	error("Couldn't find the specified plugin name for %s "
+	      "looking at all files",
+	      c->checkpoint_type);
+	
         /* Get the plugin list, if needed. */
         if ( c->plugin_list == NULL ) {
 		char *plugin_dir;
diff --git a/src/common/env.c b/src/common/env.c
index f7e5b5e5148d4556348aa70be47417ad97ee2a09..0d416a5a6297789ffa7efb8092ee17c9976703c1 100644
--- a/src/common/env.c
+++ b/src/common/env.c
@@ -1,8 +1,8 @@
 /*****************************************************************************\
  *  src/common/env.c - add an environment variable to environment vector
- *  $Id: env.c 14025 2008-05-09 16:37:03Z jette $
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>, Danny Auble <da@llnl.gov>.
  *  LLNL-CODE-402394.
@@ -168,15 +168,16 @@ int
 setenvfs(const char *fmt, ...)
 {
 	va_list ap;
-	char buf[ENV_BUFSIZE];
-	char *bufcpy;
+	char *buf, *bufcpy;
 	int rc;
 
+	buf = xmalloc(ENV_BUFSIZE);
 	va_start(ap, fmt);
-	vsnprintf(buf, sizeof(buf), fmt, ap);
+	vsnprintf(buf, ENV_BUFSIZE, fmt, ap);
 	va_end(ap);
 	
 	bufcpy = xstrdup(buf);
+	xfree(buf);
 	rc = putenv(bufcpy);
 	return rc;
 }
@@ -184,17 +185,18 @@ setenvfs(const char *fmt, ...)
 int 
 setenvf(char ***envp, const char *name, const char *fmt, ...)
 {
-	char buf[ENV_BUFSIZE];
 	char **ep = NULL;
 	char *str = NULL;
 	va_list ap;
 	int rc;
-	char *bufcpy;
+	char *buf, *bufcpy;
 
+	buf = xmalloc(ENV_BUFSIZE);
 	va_start(ap, fmt);
-	vsnprintf (buf, sizeof(buf), fmt, ap);
+	vsnprintf (buf, ENV_BUFSIZE, fmt, ap);
 	va_end(ap);
 	bufcpy = xstrdup(buf);
+	xfree(buf);
 	
 	xstrfmtcat (str, "%s=%s", name, bufcpy);
 	xfree(bufcpy);
@@ -735,7 +737,7 @@ static char *_uint16_array_to_str(int array_len, const uint16_t *array)
  * This function returns the string representation of the compressed
  * array.  Free with xfree().
  */
-static char *_uint32_compressed_to_str(uint32_t array_len,
+extern char *uint32_compressed_to_str(uint32_t array_len,
 				       const uint32_t *array,
 				       const uint32_t *array_reps)
 {
@@ -789,7 +791,7 @@ env_array_for_job(char ***dest, const resource_allocation_response_msg_t *alloc)
 	env_array_overwrite_fmt(dest, "SLURM_JOB_NODELIST", "%s",
 				alloc->node_list);
 
-	tmp = _uint32_compressed_to_str((uint32_t)alloc->num_cpu_groups,
+	tmp = uint32_compressed_to_str((uint32_t)alloc->num_cpu_groups,
 					alloc->cpus_per_node,
 					alloc->cpu_count_reps);
 	env_array_overwrite_fmt(dest, "SLURM_JOB_CPUS_PER_NODE", "%s", tmp);
@@ -847,7 +849,7 @@ env_array_for_batch_job(char ***dest, const batch_job_launch_msg_t *batch,
 	env_array_overwrite_fmt(dest, "SLURM_JOB_ID", "%u", batch->job_id);
 	env_array_overwrite_fmt(dest, "SLURM_JOB_NUM_NODES", "%u", num_nodes);
 	env_array_overwrite_fmt(dest, "SLURM_JOB_NODELIST", "%s", batch->nodes);
-	tmp = _uint32_compressed_to_str((uint32_t)batch->num_cpu_groups,
+	tmp = uint32_compressed_to_str((uint32_t)batch->num_cpu_groups,
 					batch->cpus_per_node,
 					batch->cpu_count_reps);
 	env_array_overwrite_fmt(dest, "SLURM_JOB_CPUS_PER_NODE", "%s", tmp);
@@ -978,30 +980,28 @@ char **env_array_create(void)
 int env_array_append_fmt(char ***array_ptr, const char *name,
 			 const char *value_fmt, ...)
 {
-	char buf[ENV_BUFSIZE];
+	char *buf;
 	char **ep = NULL;
 	char *str = NULL;
 	va_list ap;
 
-	buf[0] = '\0';
-	if (array_ptr == NULL) {
+	if (array_ptr == NULL)
 		return 0;
-	}
 
-	if (*array_ptr == NULL) {
+	if (*array_ptr == NULL)
 		*array_ptr = env_array_create();
-	}
 
-	va_start(ap, value_fmt);
-	vsnprintf (buf, sizeof(buf), value_fmt, ap);
-	va_end(ap);
-	
 	ep = _find_name_in_env(*array_ptr, name);
-	if (*ep != NULL) {
+	if (*ep != NULL)
 		return 0;
-	}
+
+	buf = xmalloc(ENV_BUFSIZE);
+	va_start(ap, value_fmt);
+	vsnprintf (buf, ENV_BUFSIZE, value_fmt, ap);
+	va_end(ap);
 
 	xstrfmtcat (str, "%s=%s", name, buf);
+	xfree(buf);
 	ep = _extend_env(array_ptr);
 	*ep = str;
 	
@@ -1021,18 +1021,15 @@ int env_array_append(char ***array_ptr, const char *name,
 	char **ep = NULL;
 	char *str = NULL;
 
-	if (array_ptr == NULL) {
+	if (array_ptr == NULL)
 		return 0;
-	}
 
-	if (*array_ptr == NULL) {
+	if (*array_ptr == NULL)
 		*array_ptr = env_array_create();
-	}
 
 	ep = _find_name_in_env(*array_ptr, name);
-	if (*ep != NULL) {
+	if (*ep != NULL)
 		return 0;
-	}
 
 	xstrfmtcat (str, "%s=%s", name, value);
 	ep = _extend_env(array_ptr);
@@ -1054,25 +1051,24 @@ int env_array_append(char ***array_ptr, const char *name,
 int env_array_overwrite_fmt(char ***array_ptr, const char *name,
 			    const char *value_fmt, ...)
 {
-	char buf[ENV_BUFSIZE];
+	char *buf;
 	char **ep = NULL;
 	char *str = NULL;
 	va_list ap;
 
-	buf[0] = '\0';
-	if (array_ptr == NULL) {
+	if (array_ptr == NULL)
 		return 0;
-	}
 
-	if (*array_ptr == NULL) {
+	if (*array_ptr == NULL)
 		*array_ptr = env_array_create();
-	}
 
+	buf = xmalloc(ENV_BUFSIZE);
 	va_start(ap, value_fmt);
-	vsnprintf (buf, sizeof(buf), value_fmt, ap);
+	vsnprintf (buf, ENV_BUFSIZE, value_fmt, ap);
 	va_end(ap);
 	
 	xstrfmtcat (str, "%s=%s", name, buf);
+	xfree(buf);
 	ep = _find_name_in_env(*array_ptr, name);
 	if (*ep != NULL) {
 		xfree (*ep);
@@ -1188,16 +1184,17 @@ static int _env_array_entry_splitter(const char *entry,
  */
 static int _env_array_putenv(const char *string)
 {
-	char name[256];
-	char value[ENV_BUFSIZE];
+	int rc = 0;
+	char name[256], *value;
 
-	if (!_env_array_entry_splitter(string, name, sizeof(name),
-				       value, sizeof(value)))
-		return 0;
-	if (setenv(name, value, 1) == -1)
-		return 0;
-	
-	return 1;
+	value = xmalloc(ENV_BUFSIZE);
+	if ((_env_array_entry_splitter(string, name, sizeof(name),
+				       value, ENV_BUFSIZE)) &&
+	    (setenv(name, value, 1) != -1))
+		rc = 1;
+
+	xfree(value);
+	return rc;
 }
 
 /*
@@ -1224,17 +1221,18 @@ void env_array_set_environment(char **env_array)
 void env_array_merge(char ***dest_array, const char **src_array)
 {
 	char **ptr;
-	char name[256];
-	char value[ENV_BUFSIZE];
+	char name[256], *value;
 
 	if (src_array == NULL)
 		return;
 
+	value = xmalloc(ENV_BUFSIZE);
 	for (ptr = (char **)src_array; *ptr != NULL; ptr++) {
 		if (_env_array_entry_splitter(*ptr, name, sizeof(name),
-					      value, sizeof(value)))
+					      value, ENV_BUFSIZE))
 			env_array_overwrite(dest_array, name, value);
 	}
+	xfree(value);
 }
 
 /*
@@ -1258,10 +1256,10 @@ static void _strip_cr_nl(char *line)
  * Load user environment from a cache file located in
  * <state_save_location>/env_username
  */
-char **_load_env_cache(const char *username)
+static char **_load_env_cache(const char *username)
 {
 	char *state_save_loc, fname[MAXPATHLEN];
-	char line[ENV_BUFSIZE], name[256], value[ENV_BUFSIZE];
+	char *line, name[256], *value;
 	char **env = NULL;
 	FILE *fp;
 	int i;
@@ -1271,26 +1269,31 @@ char **_load_env_cache(const char *username)
 		     username);
 	xfree(state_save_loc);
 	if (i < 0) {
-		fatal("Environment cache filename overflow");
+		error("Environment cache filename overflow");
 		return NULL;
 	}
 	if (!(fp = fopen(fname, "r"))) {
-		fatal("Could not open user environment cache at %s: %m",
+		error("Could not open user environment cache at %s: %m",
 			fname);
 		return NULL;
 	}
 
-	info("Getting cached environment variables at %s", fname);
+	verbose("Getting cached environment variables at %s", fname);
 	env = env_array_create();
+	line  = xmalloc(ENV_BUFSIZE);
+	value = xmalloc(ENV_BUFSIZE);
 	while (1) {
-		if (!fgets(line, sizeof(line), fp))
+		if (!fgets(line, ENV_BUFSIZE, fp))
 			break;
 		_strip_cr_nl(line);
 		if (_env_array_entry_splitter(line, name, sizeof(name), 
-					      value, sizeof(value)) &&
+					      value, ENV_BUFSIZE) &&
 		    (!_discard_env(name, value)))
 			env_array_overwrite(&env, name, value);
 	}
+	xfree(line);
+	xfree(value);
+
 	fclose(fp);
 	return env;
 }
@@ -1313,19 +1316,27 @@ char **_load_env_cache(const char *username)
  */
 char **env_array_user_default(const char *username, int timeout, int mode)
 {
-	char *line = NULL, *last = NULL, name[128], value[ENV_BUFSIZE];
-	char buffer[ENV_BUFSIZE];
+	char *line = NULL, *last = NULL, name[128], *value, *buffer;
 	char **env = NULL;
 	char *starttoken = "XXXXSLURMSTARTPARSINGHEREXXXX";
 	char *stoptoken  = "XXXXSLURMSTOPPARSINGHEREXXXXX";
 	char cmdstr[256], *env_loc = NULL;
 	int fildes[2], found, fval, len, rc, timeleft;
-	int buf_read, buf_rem;
+	int buf_read, buf_rem, config_timeout;
 	pid_t child;
 	struct timeval begin, now;
 	struct pollfd ufds;
 	struct stat buf;
 
+	if (geteuid() != (uid_t)0) {
+		fatal("WARNING: you must be root to use --get-user-env");
+		return NULL;
+	}
+
+	config_timeout = slurm_get_env_timeout();
+	if (config_timeout == 0)	/* just read directly from cache */
+		 return _load_env_cache(username);
+
 	if (stat("/bin/su", &buf))
 		fatal("Could not locate command: /bin/su");
 	if (stat("/bin/echo", &buf))
@@ -1336,11 +1347,10 @@ char **env_array_user_default(const char *username, int timeout, int mode)
 		env_loc = "/usr/bin/env";
 	else
 		fatal("Could not location command: env");
-
-	if (geteuid() != (uid_t)0) {
-		fatal("WARNING: you must be root to use --get-user-env");
-		return NULL;
-	}
+	snprintf(cmdstr, sizeof(cmdstr),
+		 "/bin/echo; /bin/echo; /bin/echo; "
+		 "/bin/echo %s; %s; /bin/echo %s",
+		 starttoken, env_loc, stoptoken);
 
 	if (pipe(fildes) < 0) {
 		fatal("pipe: %m");
@@ -1353,15 +1363,12 @@ char **env_array_user_default(const char *username, int timeout, int mode)
 		return NULL;
 	}
 	if (child == 0) {
+		setpgid(0, 0);
 		close(0);
 		open("/dev/null", O_RDONLY);
 		dup2(fildes[1], 1);
 		close(2);
 		open("/dev/null", O_WRONLY);
-		snprintf(cmdstr, sizeof(cmdstr),
-			 "/bin/echo; /bin/echo; /bin/echo; "
-			 "/bin/echo %s; %s; /bin/echo %s",
-			 starttoken, env_loc, stoptoken);
 		if      (mode == 1)
 			execl("/bin/su", "su", username, "-c", cmdstr, NULL);
 		else if (mode == 2)
@@ -1377,19 +1384,21 @@ char **env_array_user_default(const char *username, int timeout, int mode)
 	}
 
 	close(fildes[1]);
-	if ((fval = fcntl(fildes[0], F_GETFL, 0)) >= 0)
-		fcntl(fildes[0], F_SETFL, fval | O_NONBLOCK);
+	if ((fval = fcntl(fildes[0], F_GETFL, 0)) < 0)
+		error("fcntl(F_GETFL) failed: %m");
+	else if (fcntl(fildes[0], F_SETFL, fval | O_NONBLOCK) < 0)
+		error("fcntl(F_SETFL) failed: %m");
 
 	gettimeofday(&begin, NULL);
 	ufds.fd = fildes[0];
 	ufds.events = POLLIN;
 
 	/* Read all of the output from /bin/su into buffer */
-	if ((timeout == 0) && ((timeout = slurm_get_env_timeout()) == 0))
-		timeleft = DEFAULT_GET_ENV_TIMEOUT;
+	if (timeout == 0)
+		timeout = config_timeout;	/* != 0 test above */
 	found = 0;
 	buf_read = 0;
-	bzero(buffer, sizeof(buffer));
+	buffer = xmalloc(ENV_BUFSIZE);
 	while (1) {
 		gettimeofday(&now, NULL);
 		timeleft = timeout * 1000;
@@ -1397,11 +1406,13 @@ char **env_array_user_default(const char *username, int timeout, int mode)
 		timeleft -= (now.tv_usec - begin.tv_usec) / 1000;
 		if (timeleft <= 0) {
 			verbose("timeout waiting for /bin/su to complete");
+			kill(-child, 9);
 			break;
 		}
 		if ((rc = poll(&ufds, 1, timeleft)) <= 0) {
 			if (rc == 0) {
 				verbose("timeout waiting for /bin/su to complete");
+				kill(-child, 9);
 				break;
 			}
 			if ((errno == EINTR) || (errno == EAGAIN))
@@ -1419,7 +1430,7 @@ char **env_array_user_default(const char *username, int timeout, int mode)
 			}
 			break;
 		}
-		buf_rem = sizeof(buffer) - buf_read;
+		buf_rem = ENV_BUFSIZE - buf_read;
 		if (buf_rem == 0) {
 			error("buffer overflow loading env vars");
 			break;
@@ -1438,6 +1449,7 @@ char **env_array_user_default(const char *username, int timeout, int mode)
 	close(fildes[0]);
 	if (!found) {
 		error("Failed to load current user environment variables");
+		xfree(buffer);
 		return _load_env_cache(username);
 	}
 
@@ -1454,6 +1466,7 @@ char **env_array_user_default(const char *username, int timeout, int mode)
 	}
 	if (!found) {
 		error("Failed to get current user environment variables");
+		xfree(buffer);
 		return _load_env_cache(username);
 	}
 
@@ -1462,17 +1475,20 @@ char **env_array_user_default(const char *username, int timeout, int mode)
 	found = 0;
 	env = env_array_create();
 	line = strtok_r(NULL, "\n", &last);
+	value = xmalloc(ENV_BUFSIZE);
 	while (!found && line) {
 		if (!strncmp(line, stoptoken, len)) {
 			found = 1;
 			break;
 		}
 		if (_env_array_entry_splitter(line, name, sizeof(name), 
-					      value, sizeof(value)) &&
+					      value, ENV_BUFSIZE) &&
 		    (!_discard_env(name, value)))
 			env_array_overwrite(&env, name, value);
 		line = strtok_r(NULL, "\n", &last);
 	}
+	xfree(value);
+	xfree(buffer);
 	if (!found) {
 		error("Failed to get all user environment variables");
 		env_array_free(env);
diff --git a/src/common/env.h b/src/common/env.h
index f59adaf17ddecab542c076840d4615a409768a80..d47b90cf78557c2a7567758131c520a6fc79450a 100644
--- a/src/common/env.h
+++ b/src/common/env.h
@@ -260,4 +260,20 @@ void env_array_set_environment(char **env_array);
  */
 char **env_array_user_default(const char *username, int timeout, int mode);
 
+/*
+ * The cpus-per-node representation in SLURM (and perhaps tasks-per-node
+ * in the future) is stored in a compressed format comprised of two
+ * equal-length arrays of uint32_t, and an integer holding the array length.
+ * In one array an element represents a count (number of cpus, number of tasks,
+ * etc.), and the corresponding element in the other array contains the
+ * number of times the count is repeated sequentially in the uncompressed
+ * something-per-node array.
+ *
+ * This function returns the string representation of the compressed
+ * array.  Free with xfree().
+ */
+char *uint32_compressed_to_str(uint32_t array_len,
+			       const uint32_t *array,
+			       const uint32_t *array_reps);
+
 #endif
diff --git a/src/common/jobacct_common.c b/src/common/jobacct_common.c
index 316123bfa186f973d50a7903473371213adfa465..847e171f547d230d0b6f032d36865a5b39442355 100644
--- a/src/common/jobacct_common.c
+++ b/src/common/jobacct_common.c
@@ -48,6 +48,7 @@ uint32_t cont_id = (uint32_t)NO_VAL;
 uint32_t acct_job_id = 0;
 uint32_t job_mem_limit = 0;
 bool pgid_plugin = false;
+uint32_t mult = 1000;
 
 static void _pack_jobacct_id(jobacct_id_t *jobacct_id, Buf buffer)
 {
@@ -67,7 +68,7 @@ unpack_error:
 static void _pack_sacct(sacct_t *sacct, Buf buffer)
 {
 	int i=0;
-	int mult = 1000000;
+	uint32_t temp;
 
 	if(!sacct) {
 		for(i=0; i<8; i++)
@@ -79,14 +80,20 @@ static void _pack_sacct(sacct_t *sacct, Buf buffer)
 		}
 		return;
 	} 
-	pack32((uint32_t)sacct->max_vsize, buffer);
-	pack32((uint32_t)(sacct->ave_vsize*mult), buffer);
-	pack32((uint32_t)sacct->max_rss, buffer);
-	pack32((uint32_t)(sacct->ave_rss*mult), buffer);
-	pack32((uint32_t)sacct->max_pages, buffer);
-	pack32((uint32_t)(sacct->ave_pages*mult), buffer);
-	pack32((uint32_t)(sacct->min_cpu*mult), buffer);
-	pack32((uint32_t)(sacct->ave_cpu*mult), buffer);
+
+	pack32(sacct->max_vsize, buffer);
+	temp = sacct->ave_vsize * mult;
+	pack32(temp, buffer);
+	pack32(sacct->max_rss, buffer);
+	temp = (uint32_t)sacct->ave_rss * mult;
+	pack32(temp, buffer);
+	pack32(sacct->max_pages, buffer);
+	temp = (uint32_t)sacct->ave_pages * mult;
+	pack32(temp, buffer);
+	temp = (uint32_t)sacct->min_cpu * mult;
+	pack32(temp, buffer);
+	temp = (uint32_t)sacct->ave_cpu * mult;
+	pack32(temp, buffer);
 
 	_pack_jobacct_id(&sacct->max_vsize_id, buffer);
 	_pack_jobacct_id(&sacct->max_rss_id, buffer);
@@ -97,21 +104,24 @@ static void _pack_sacct(sacct_t *sacct, Buf buffer)
 /* you need to xfree this */
 static int _unpack_sacct(sacct_t *sacct, Buf buffer)
 {
-	int mult = 1000000;
+	/* this is here to handle the floats since it appears sending
+	 * in a float with a typecast returns incorrect information
+	 */
+	uint32_t temp;
 
 	safe_unpack32(&sacct->max_vsize, buffer);
-	safe_unpack32((uint32_t *)&sacct->ave_vsize, buffer);
-	sacct->ave_vsize /= mult;
+	safe_unpack32(&temp, buffer);
+	sacct->ave_vsize = temp / mult;
 	safe_unpack32(&sacct->max_rss, buffer);
-	safe_unpack32((uint32_t *)&sacct->ave_rss, buffer);
-	sacct->ave_rss /= mult;
+	safe_unpack32(&temp, buffer);
+	sacct->ave_rss = temp / mult;
 	safe_unpack32(&sacct->max_pages, buffer);
-	safe_unpack32((uint32_t *)&sacct->ave_pages, buffer);
-	sacct->ave_pages /= mult;
-	safe_unpack32((uint32_t *)&sacct->min_cpu, buffer);
-	sacct->min_cpu /= mult;
-	safe_unpack32((uint32_t *)&sacct->ave_cpu, buffer);
-	sacct->ave_cpu /= mult;
+	safe_unpack32(&temp, buffer);
+	sacct->ave_pages = temp / mult;
+	safe_unpack32(&temp, buffer);
+	sacct->min_cpu = temp / mult;
+	safe_unpack32(&temp, buffer);
+	sacct->ave_cpu = temp / mult;
 	if(_unpack_jobacct_id(&sacct->max_vsize_id, buffer) != SLURM_SUCCESS)
 		goto unpack_error;
 	if(_unpack_jobacct_id(&sacct->max_rss_id, buffer) != SLURM_SUCCESS)
@@ -135,6 +145,7 @@ extern jobacct_job_rec_t *create_jobacct_job_rec()
 	job->state = JOB_PENDING;
 	job->steps = list_create(destroy_jobacct_step_rec);
 	job->requid = -1;
+	job->lft = (uint32_t)NO_VAL;
 
       	return job;
 }
@@ -212,6 +223,7 @@ extern void pack_jobacct_job_rec(void *object, Buf buffer)
 	pack32(job->gid, buffer);
 	pack32(job->jobid, buffer);
 	packstr(job->jobname, buffer);
+	pack32(job->lft, buffer);
 	packstr(job->partition, buffer);
 	packstr(job->nodes, buffer);
 	pack32(job->priority, buffer);
@@ -240,7 +252,7 @@ extern void pack_jobacct_job_rec(void *object, Buf buffer)
 	pack32(job->tot_cpu_usec, buffer);
 	pack16(job->track_steps, buffer);
 	pack32(job->uid, buffer);
-	//packstr(job->user, buffer);
+	packstr(job->user, buffer);
 	pack32(job->user_cpu_sec, buffer);
 	pack32(job->user_cpu_usec, buffer);
 }
@@ -267,6 +279,7 @@ extern int unpack_jobacct_job_rec(void **job, Buf buffer)
 	safe_unpack32(&job_ptr->gid, buffer);
 	safe_unpack32(&job_ptr->jobid, buffer);
 	safe_unpackstr_xmalloc(&job_ptr->jobname, &uint32_tmp, buffer);
+	safe_unpack32(&job_ptr->lft, buffer);
 	safe_unpackstr_xmalloc(&job_ptr->partition, &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&job_ptr->nodes, &uint32_tmp, buffer);
 	safe_unpack32((uint32_t *)&job_ptr->priority, buffer);
@@ -294,23 +307,14 @@ extern int unpack_jobacct_job_rec(void **job, Buf buffer)
 	safe_unpack32(&job_ptr->tot_cpu_usec, buffer);
 	safe_unpack16(&job_ptr->track_steps, buffer);
 	safe_unpack32(&job_ptr->uid, buffer);
-	//safe_unpackstr_xmalloc(&job_ptr->user, &uint32_tmp, buffer);
+	safe_unpackstr_xmalloc(&job_ptr->user, &uint32_tmp, buffer);
 	safe_unpack32(&job_ptr->user_cpu_sec, buffer);
 	safe_unpack32(&job_ptr->user_cpu_usec, buffer);
 	
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(job_ptr->account);
-	xfree(job_ptr->blockid);
-	xfree(job_ptr->cluster);
-	xfree(job_ptr->jobname);
-	xfree(job_ptr->partition);
-	xfree(job_ptr->nodes);
-	if(job_ptr->steps)
-		list_destroy(job_ptr->steps);
-	xfree(job_ptr->user);
-	xfree(job_ptr);
+	destroy_jobacct_job_rec(job_ptr);
 	*job = NULL;
 	return SLURM_ERROR;
 }
@@ -368,9 +372,7 @@ extern int unpack_jobacct_step_rec(jobacct_step_rec_t **step, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(step_ptr->nodes);
-	xfree(step_ptr->stepname);
-	xfree(step_ptr);
+	destroy_jobacct_step_rec(step_ptr);
 	*step = NULL;
 	return SLURM_ERROR;
 } 
@@ -401,9 +403,7 @@ extern int unpack_jobacct_selected_step(jobacct_selected_step_t **step,
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(step_ptr->job);
-	xfree(step_ptr->step);
-	xfree(step_ptr);
+	destroy_jobacct_selected_step(step_ptr);
 	*step = NULL;
 	return SLURM_ERROR;
 }
@@ -527,7 +527,7 @@ rwfail:
 }
 
 extern int jobacct_common_getinfo(struct jobacctinfo *jobacct, 
-			  enum jobacct_data_type type, void *data)
+				  enum jobacct_data_type type, void *data)
 {
 	int rc = SLURM_SUCCESS;
 	int *fd = (int *)data;
@@ -744,6 +744,7 @@ extern int jobacct_common_unpack(struct jobacctinfo **jobacct, Buf buffer)
 	if(_unpack_jobacct_id(&(*jobacct)->min_cpu_id, buffer)
 	   != SLURM_SUCCESS)
 		goto unpack_error;
+
 	return SLURM_SUCCESS;
 
 unpack_error:
diff --git a/src/common/jobacct_common.h b/src/common/jobacct_common.h
index e9e7089d53bd630da49c83c2b572e15c439e6f28..e785e9c94f96a74f8d8239b19aba2e83a2abc54f 100644
--- a/src/common/jobacct_common.h
+++ b/src/common/jobacct_common.h
@@ -125,6 +125,7 @@ typedef struct {
 	uint32_t gid;
 	uint32_t jobid;
 	char	*jobname;
+	uint32_t lft;
 	char	*partition;
 	char	*nodes;
 	int32_t priority;
diff --git a/src/common/mpi.c b/src/common/mpi.c
index ace6fab970aa28395bbb459a8788296f6c6a617f..3e71c8dc23efa604412d14ba601f6b5fec5ae1f5 100644
--- a/src/common/mpi.c
+++ b/src/common/mpi.c
@@ -118,6 +118,8 @@ _slurm_mpi_context_destroy( slurm_mpi_context_t c )
 		if ( plugrack_destroy( c->plugin_list ) != SLURM_SUCCESS ) {
 			return SLURM_ERROR;
 		}
+	} else {
+		plugin_unload(c->cur_plugin);
 	}
 
 	xfree(c->mpi_type);
@@ -145,6 +147,16 @@ _slurm_mpi_get_ops( slurm_mpi_context_t c )
 	int n_syms = sizeof( syms ) / sizeof( char * );
 	char *plugin_dir = NULL;
 	
+	/* Find the correct plugin. */
+        c->cur_plugin = plugin_load_and_link(c->mpi_type, n_syms, syms,
+					     (void **) &c->ops);
+        if ( c->cur_plugin != PLUGIN_INVALID_HANDLE ) 
+        	return &c->ops;
+
+	error("Couldn't find the specified plugin name for %s "
+	      "looking at all files",
+	      c->mpi_type);
+	
 	/* Get the plugin list, if needed. */
 	if ( c->plugin_list == NULL ) {
 		c->plugin_list = plugrack_create();
diff --git a/src/common/node_select.c b/src/common/node_select.c
index d247fbd4d70c1fc37c5cf7675e5c3301358f7ef7..71c67fccf630a0180e94cca5cabb3bd36b9e305f 100644
--- a/src/common/node_select.c
+++ b/src/common/node_select.c
@@ -9,7 +9,7 @@
  *  the plugin. This is because functions required by the plugin can not be 
  *  resolved on the front-end nodes, so we can't load the plugins there.
  *
- *  $Id: node_select.c 13697 2008-03-21 21:56:40Z da $
+ *  $Id: node_select.c 14208 2008-06-06 19:15:24Z da $
  *****************************************************************************
  *  Copyright (C) 2002-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -196,6 +196,16 @@ static slurm_select_ops_t * _select_get_ops(slurm_select_context_t *c)
 	};
 	int n_syms = sizeof( syms ) / sizeof( char * );
 
+	/* Find the correct plugin. */
+        c->cur_plugin = plugin_load_and_link(c->select_type, n_syms, syms,
+					     (void **) &c->ops);
+        if ( c->cur_plugin != PLUGIN_INVALID_HANDLE ) 
+        	return &c->ops;
+
+	error("Couldn't find the specified plugin name for %s "
+	      "looking at all files",
+	      c->select_type);
+	
 	/* Get plugin list. */
 	if ( c->plugin_list == NULL ) {
 		char *plugin_dir;
@@ -266,6 +276,8 @@ static int _select_context_destroy( slurm_select_context_t *c )
 		if ( plugrack_destroy( c->plugin_list ) != SLURM_SUCCESS ) {
 			return SLURM_ERROR;
 		}
+	} else {
+		plugin_unload(c->cur_plugin);
 	}
 
 	xfree( c->select_type );
diff --git a/src/common/pack.c b/src/common/pack.c
index 53902756f17b2e44a7b0d21658ce656fda389346..d48f3c0413b64db56c42c52a97e35b77262e1ad4 100644
--- a/src/common/pack.c
+++ b/src/common/pack.c
@@ -47,7 +47,6 @@
 #include <string.h>
 #include <time.h>
 #include <inttypes.h>
-
 #include <slurm/slurm_errno.h>
 
 #include "src/common/pack.h"
@@ -65,6 +64,8 @@ strong_alias(init_buf,		slurm_init_buf);
 strong_alias(xfer_buf_data,	slurm_xfer_buf_data);
 strong_alias(pack_time,		slurm_pack_time);
 strong_alias(unpack_time,	slurm_unpack_time);
+strong_alias(pack64,		slurm_pack64);
+strong_alias(unpack64,		slurm_unpack64);
 strong_alias(pack32,		slurm_pack32);
 strong_alias(unpack32,		slurm_unpack32);
 strong_alias(pack16,		slurm_pack16);
@@ -174,6 +175,39 @@ int unpack_time(time_t * valp, Buf buffer)
 }
 
 
+/*
+ * Given a 64-bit integer in host byte order, convert to network byte order
+ * store in buffer, and adjust buffer counters.
+ */
+void pack64(uint64_t val, Buf buffer)
+{
+	uint64_t nl =  HTON_uint64(val);
+
+	if (remaining_buf(buffer) < sizeof(nl)) {
+		buffer->size += BUF_SIZE;
+		xrealloc(buffer->head, buffer->size);
+	}
+
+	memcpy(&buffer->head[buffer->processed], &nl, sizeof(nl));
+	buffer->processed += sizeof(nl);
+}
+
+/*
+ * Given a buffer containing a network byte order 64-bit integer,
+ * store a host integer at 'valp', and adjust buffer counters.
+ */
+int unpack64(uint64_t * valp, Buf buffer)
+{
+	uint64_t nl;
+	if (remaining_buf(buffer) < sizeof(nl))
+		return SLURM_ERROR;
+	
+	memcpy(&nl, &buffer->head[buffer->processed], sizeof(nl));
+	*valp = NTOH_uint64(nl);
+	buffer->processed += sizeof(nl);
+	return SLURM_SUCCESS;
+}
+
 /*
  * Given a 32-bit integer in host byte order, convert to network byte order
  * store in buffer, and adjust buffer counters.
diff --git a/src/common/pack.h b/src/common/pack.h
index 4b18355e9c46e703c06cb6d55d14b91a4374d40a..3e537b6e07519b8e896220c783889986bf303bd9 100644
--- a/src/common/pack.h
+++ b/src/common/pack.h
@@ -83,6 +83,9 @@ void	*xfer_buf_data(Buf my_buf);
 void	pack_time(time_t val, Buf buffer);
 int	unpack_time(time_t *valp, Buf buffer);
 
+void 	pack64(uint64_t val, Buf buffer);
+int	unpack64(uint64_t *valp, Buf buffer);
+
 void 	pack32(uint32_t val, Buf buffer);
 int	unpack32(uint32_t *valp, Buf buffer);
 
@@ -124,6 +127,20 @@ int	unpackmem_array(char *valp, uint32_t size_valp, Buf buffer);
 		goto unpack_error;			\
 } while (0)
 
+#define safe_pack64(val,buf) do {			\
+	assert(sizeof(val) == sizeof(uint64_t)); 	\
+	assert(buf->magic == BUF_MAGIC);		\
+	pack64(val,buf);				\
+} while (0)
+
+#define safe_unpack64(valp,buf) do {			\
+	assert((valp) != NULL); 			\
+	assert(sizeof(*valp) == sizeof(uint64_t));      \
+	assert(buf->magic == BUF_MAGIC);		\
+        if (unpack64(valp,buf))				\
+		goto unpack_error;			\
+} while (0)
+
 #define safe_pack32(val,buf) do {			\
 	assert(sizeof(val) == sizeof(uint32_t)); 	\
 	assert(buf->magic == BUF_MAGIC);		\
diff --git a/src/common/plugin.c b/src/common/plugin.c
index 5ca7f372c0de1e1b75c45ae7875a4a88f8ebd52c..772edb27508b2091e4725bbd94d6630393de7a28 100644
--- a/src/common/plugin.c
+++ b/src/common/plugin.c
@@ -1,7 +1,8 @@
 /*****************************************************************************\
  * plugin.h - plugin architecture implementation.
  *****************************************************************************
- *  Copyright (C) 2002 The Regents of the University of California.
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Jay Windley <jwindley@lnxi.com>.
  *  LLNL-CODE-402394.
@@ -45,10 +46,27 @@
 #include <dlfcn.h>        /* don't know if there's an autoconf for this. */
 #include <string.h>
 
+#include "src/common/xmalloc.h"
 #include "src/common/log.h"
 #include "src/common/plugin.h"
+#include "src/common/xstring.h"
+#include "src/common/slurm_protocol_api.h"
 #include <slurm/slurm_errno.h>
 
+#  if HAVE_UNISTD_H
+#    include <unistd.h>
+#  endif /* HAVE_UNISTD_H */
+#  if HAVE_SYS_TYPES_H
+#    include <sys/types.h>
+#  endif
+#  if HAVE_SYS_STAT_H
+#    include <sys/stat.h>
+#  endif
+
+#  if HAVE_STDLIB_H
+#    include <stdlib.h>
+#  endif
+
 /* dlerror() on AIX sometimes fails, revert to strerror() as needed */
 static char *_dlerror(void)
 {
@@ -151,7 +169,67 @@ plugin_load_from_file( const char *fq_path )
         return plug;
 }
 
+plugin_handle_t
+plugin_load_and_link(const char *type_name, int n_syms,
+		    const char *names[], void *ptrs[])
+{
+        plugin_handle_t plug = PLUGIN_INVALID_HANDLE;
+	struct stat st;
+	char *head=NULL, *dir_array=NULL, *so_name = NULL,
+		*file_name=NULL;
+	int i=0;
+	
+	if (!type_name)
+		return plug;
+
+	so_name = xstrdup_printf("%s.so", type_name);
+
+	while(so_name[i]) {
+		if(so_name[i] == '/')
+			so_name[i] = '_';
+		i++;
+	}
+	if(!(dir_array = slurm_get_plugin_dir())) {
+		error("plugin_load_and_link: No plugin dir given");
+		xfree(so_name);
+		return plug;
+	}
+	
+	head = dir_array;
+	for (i=0; ; i++) {
+		bool got_colon = 0;
+		if (dir_array[i] == ':') {
+			dir_array[i] = '\0';
+			got_colon = 1;
+		} else if(dir_array[i] != '\0') 
+			continue;
+		
+		file_name = xstrdup_printf("%s/%s", head, so_name);
+		debug3("Trying to load plugin %s", file_name);
+		if ((stat(file_name, &st) < 0) || (!S_ISREG(st.st_mode))) {
+			debug4("No Good.");
+			xfree(file_name);
+		} else {
+			plug = plugin_load_from_file(file_name);
+			xfree(file_name);
+			if (plugin_get_syms(plug, n_syms, names, ptrs) >= 
+			    n_syms) {
+				debug3("Success.");
+				break;
+			} else 
+				plug = PLUGIN_INVALID_HANDLE;
+		}
 
+		if (got_colon) {
+			head = dir_array + i + 1;
+		} else 
+			break;
+	}
+	
+	xfree(dir_array);
+	xfree(so_name);
+	return plug;
+}
 /*
  * Must test plugin validity before doing dlopen() and dlsym()
  * operations because some implementations of these functions
diff --git a/src/common/plugin.h b/src/common/plugin.h
index 9db07b24d549765b30a141bee7c7304dbc6dd4a2..e634320f171f1209b35a453cee6d5424fc38ea52 100644
--- a/src/common/plugin.h
+++ b/src/common/plugin.h
@@ -117,6 +117,24 @@ int plugin_peek( const char *fq_path,
  */
 plugin_handle_t plugin_load_from_file( const char *fq_path );
 
+/*
+ * load plugin and link hooks.
+ *
+ * type_name - plugin type as entered into slurm.conf.
+ *
+ * n_syms - the number of symbols in names[].
+ * names[] - an argv-like array of symbol names to resolve.
+ * ptrs[] - an array of pointers into which the addresses of the respective
+ * 	symbols should be placed.  ptrs[i] will receive the address of
+ *	names[i].
+ *
+ * Returns a handle if successful, or NULL if not.
+ *
+ * The plugin's initialization code will be executed prior
+ * to this function's return.
+ */
+plugin_handle_t plugin_load_and_link(const char *type_name, int n_syms,
+				     const char *names[], void *ptrs[]);
 
 /*
  * Unload a plugin from memory.
diff --git a/src/common/plugrack.c b/src/common/plugrack.c
index cc4a948b38990e94ec892d1f32753c09ed831b03..eb595df026e444fb6eb33956de9d02246bbf3770 100644
--- a/src/common/plugrack.c
+++ b/src/common/plugrack.c
@@ -329,9 +329,7 @@ plugrack_read_dir( plugrack_t rack, const char *dir )
 	if ( ( ! rack ) || (! dir ) )
 		return SLURM_ERROR;
 
-	dir_array = xmalloc( strlen( dir ) + 1 );
-	xassert( dir_array );
-	strcpy( dir_array, dir );
+	dir_array = xstrdup(dir);
 	head = dir_array;
 	for (i=0; ; i++) {
 		if (dir_array[i] == '\0') {
diff --git a/src/common/plugstack.c b/src/common/plugstack.c
index 16b3a137d64c1aebc0952423b5f7b0581ed21b5d..09e9899df2f7bffc85b29fc34d65b3f046d08b05 100644
--- a/src/common/plugstack.c
+++ b/src/common/plugstack.c
@@ -377,8 +377,13 @@ static int _spank_stack_create(const char *path, List * listp)
 
 	verbose("spank: opening plugin stack %s\n", path);
 
-	if (!(fp = safeopen(path, "r", SAFEOPEN_NOCREATE)))
+	if (!(fp = safeopen(path, "r", SAFEOPEN_NOCREATE))) {
+		if (errno == ENOENT)
+			debug("spank: Failed to open %s: %m\n", path);
+		else
+			error("spank: Failed to open %s: %m\n", path);
 		return -1;
+	}
 
 	line = 1;
 	while (fgets(buf, sizeof(buf), fp)) {
diff --git a/src/sacctmgr/print.c b/src/common/print_fields.c
similarity index 70%
rename from src/sacctmgr/print.c
rename to src/common/print_fields.c
index e852a4dfd3aee3b6d979e02e11f972974ee4aca1..9beeffd5606f95896c35e6feedbb58159ae80fbc 100644
--- a/src/sacctmgr/print.c
+++ b/src/common/print_fields.c
@@ -35,10 +35,11 @@
  *  with SLURM; if not, write to the Free Software Foundation, Inc.,
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \*****************************************************************************/
-#include "src/sacctmgr/print.h"
+#include "src/common/print_fields.h"
 #include "src/common/parse_time.h"
-int parsable_print = 0;
-int have_header = 1;
+
+int print_fields_parsable_print = 0;
+int print_fields_have_header = 1;
 
 extern void destroy_print_field(void *object)
 {
@@ -50,12 +51,12 @@ extern void destroy_print_field(void *object)
 	}
 }
 
-extern void print_header(List print_fields_list)
+extern void print_fields_header(List print_fields_list)
 {
 	ListIterator itr = NULL;
 	print_field_t *object = NULL;
 
-	if(!print_fields_list || !have_header) 
+	if(!print_fields_list || !print_fields_have_header) 
 		return;
 
 	itr = list_iterator_create(print_fields_list);
@@ -64,7 +65,7 @@ extern void print_header(List print_fields_list)
 	}
 	list_iterator_reset(itr);
 	printf("\n");
-	if(parsable_print)
+	if(print_fields_parsable_print)
 		return;
 	while((object = list_next(itr))) {
 		(object->print_routine)(SLURM_PRINT_UNDERSCORE, object, 0);
@@ -73,7 +74,7 @@ extern void print_header(List print_fields_list)
 	printf("\n");	
 }
 
-extern void print_date(void)
+extern void print_fields_date(void)
 {
 	time_t now;
 
@@ -82,37 +83,37 @@ extern void print_date(void)
 
 }
 
-extern void print_str(type_t type, print_field_t *field, char *value)
+extern void print_fields_str(type_t type, print_field_t *field, char *value)
 {
 	char *print_this = value;
 
 	switch(type) {
 	case SLURM_PRINT_HEADLINE:
-		if(parsable_print)
+		if(print_fields_parsable_print)
 			printf("%s|", field->name);
 		else
 			printf("%-*.*s ", field->len, field->len, field->name);
 		break;
 	case SLURM_PRINT_UNDERSCORE:
-		if(!parsable_print)
+		if(!print_fields_parsable_print)
 			printf("%-*.*s ", field->len, field->len, 
 			       "---------------------------------------");
 		break;
 	case SLURM_PRINT_VALUE:
 		if(!print_this) {
-			if(parsable_print)
+			if(print_fields_parsable_print)
 				print_this = "";
 			else
 				print_this = " ";
 		}
 
-		if(parsable_print)
+		if(print_fields_parsable_print)
 			printf("%s|", print_this);
 		else
 			printf("%-*.*s ", field->len, field->len, print_this);
 		break;
 	default:
-		if(parsable_print)
+		if(print_fields_parsable_print)
 			printf("%s|", "n/a");
 		else
 			printf("%-*s ", field->len, "n/a");
@@ -120,36 +121,37 @@ extern void print_str(type_t type, print_field_t *field, char *value)
 	}
 }
 
-extern void print_uint(type_t type, print_field_t *field, uint32_t value)
+extern void print_fields_uint32(type_t type, print_field_t *field,
+				uint32_t value)
 {
 	switch(type) {
 	case SLURM_PRINT_HEADLINE:
-		if(parsable_print)
+		if(print_fields_parsable_print)
 			printf("%s|", field->name);
 		else
 			printf("%-*.*s ", field->len, field->len, field->name);
 		break;
 	case SLURM_PRINT_UNDERSCORE:
-		if(!parsable_print)
+		if(!print_fields_parsable_print)
 			printf("%-*.*s ", field->len, field->len, 
 			       "---------------------------------------");
 		break;
 	case SLURM_PRINT_VALUE:
 		/* (value == unset)  || (value == cleared) */
 		if((value == NO_VAL) || (value == INFINITE)) {
-			if(parsable_print)
+			if(print_fields_parsable_print)
 				printf("|");	
 			else				
 				printf("%-*s ", field->len, " ");
 		} else {
-			if(parsable_print)
+			if(print_fields_parsable_print)
 				printf("%u|", value);	
 			else
 				printf("%*u ", field->len, value);
 		}
 		break;
 	default:
-		if(parsable_print)
+		if(print_fields_parsable_print)
 			printf("%s|", "n/a");
 		else
 			printf("%-*.*s ", field->len, field->len, "n/a");
@@ -157,24 +159,63 @@ extern void print_uint(type_t type, print_field_t *field, uint32_t value)
 	}
 }
 
-extern void print_time(type_t type, print_field_t *field, uint32_t value)
+extern void print_fields_uint64(type_t type, print_field_t *field,
+				uint64_t value)
 {
 	switch(type) {
 	case SLURM_PRINT_HEADLINE:
-		if(parsable_print)
+		if(print_fields_parsable_print)
 			printf("%s|", field->name);
 		else
 			printf("%-*.*s ", field->len, field->len, field->name);
 		break;
 	case SLURM_PRINT_UNDERSCORE:
-		if(!parsable_print)
+		if(!print_fields_parsable_print)
 			printf("%-*.*s ", field->len, field->len, 
 			       "---------------------------------------");
 		break;
 	case SLURM_PRINT_VALUE:
 		/* (value == unset)  || (value == cleared) */
 		if((value == NO_VAL) || (value == INFINITE)) {
-			if(parsable_print)
+			if(print_fields_parsable_print)
+				printf("|");	
+			else				
+				printf("%-*s ", field->len, " ");
+		} else {
+			if(print_fields_parsable_print)
+				printf("%llu|", (long long unsigned) value);	
+			else
+				printf("%*llu ", field->len, 
+				       (long long unsigned) value);
+		}
+		break;
+	default:
+		if(print_fields_parsable_print)
+			printf("%s|", "n/a");
+		else
+			printf("%-*.*s ", field->len, field->len, "n/a");
+		break;
+	}
+}
+
+extern void print_fields_time(type_t type, print_field_t *field, uint32_t value)
+{
+	switch(type) {
+	case SLURM_PRINT_HEADLINE:
+		if(print_fields_parsable_print)
+			printf("%s|", field->name);
+		else
+			printf("%-*.*s ", field->len, field->len, field->name);
+		break;
+	case SLURM_PRINT_UNDERSCORE:
+		if(!print_fields_parsable_print)
+			printf("%-*.*s ", field->len, field->len, 
+			       "---------------------------------------");
+		break;
+	case SLURM_PRINT_VALUE:
+		/* (value == unset)  || (value == cleared) */
+		if((value == NO_VAL) || (value == INFINITE)) {
+			if(print_fields_parsable_print)
 				printf("|");	
 			else
 				printf("%-*s ", field->len, " ");
@@ -182,7 +223,7 @@ extern void print_time(type_t type, print_field_t *field, uint32_t value)
 			char time_buf[32];
 			mins2time_str((time_t) value, 
 				      time_buf, sizeof(time_buf));
-			if(parsable_print)
+			if(print_fields_parsable_print)
 				printf("%s|", time_buf);
 			else
 				printf("%*s ", field->len, time_buf);
@@ -193,3 +234,4 @@ extern void print_time(type_t type, print_field_t *field, uint32_t value)
 		break;
 	}
 }
+
diff --git a/src/sacctmgr/print.h b/src/common/print_fields.h
similarity index 79%
rename from src/sacctmgr/print.h
rename to src/common/print_fields.h
index a4f0bf760468f013ef32c9f14aeac77a8d603ab9..0a4e2c04657bb7c281ca9f95c1e7314283a2dcc9 100644
--- a/src/sacctmgr/print.h
+++ b/src/common/print_fields.h
@@ -1,5 +1,5 @@
 /*****************************************************************************\
- *  print.h - definitions for all printing functions.
+ *  printfields.h - definitions for all printing functions.
  *****************************************************************************
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Copyright (C) 2002-2007 The Regents of the University of California.
@@ -35,8 +35,8 @@
  *  with SLURM; if not, write to the Free Software Foundation, Inc.,
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \*****************************************************************************/
-#ifndef __SACCTMGR_PRINT_H__
-#define __SACCTMGR_PRINT_H__
+#ifndef __PRINT_FIELDS_H__
+#define __PRINT_FIELDS_H__
 
 #if HAVE_CONFIG_H
 #  include "config.h"
@@ -63,9 +63,9 @@
 
 #include <slurm/slurm.h>
 
+#include "src/common/xmalloc.h"
 #include "src/common/xstring.h"
-#include "src/common/slurm_accounting_storage.h"
-#include "src/common/jobacct_common.h"
+#include "src/common/list.h"
 
 typedef enum {
 	SLURM_PRINT_HEADLINE,
@@ -80,14 +80,19 @@ typedef struct {
 	uint16_t type; /* defined in the local function */
 } print_field_t;
 
-extern int parsable_print;
-extern int have_header;
+extern int print_fields_parsable_print;
+extern int print_fields_have_header;
 
 extern void destroy_print_field(void *object);
-extern void print_header(List print_fields_list);
-extern void print_date(void);
-extern void print_str(type_t type, print_field_t *field, char *value);
-extern void print_uint(type_t type, print_field_t *field, uint32_t value);
-extern void print_time(type_t type, print_field_t *field, uint32_t value);
+extern void print_fields_header(List print_fields_list);
+extern void print_fields_date(void);
+extern void print_fields_str(type_t type, print_field_t *field, char *value);
+extern void print_fields_uint32(type_t type, print_field_t *field,
+				uint32_t value);
+extern void print_fields_uint64(type_t type, print_field_t *field,
+				uint64_t value);
+extern void print_fields_time(type_t type, print_field_t *field,
+			      uint32_t value);
 
+#define print_fields_uint print_fields_uint32
 #endif
diff --git a/src/common/proc_args.c b/src/common/proc_args.c
index e46fbf4b8e0975c0018eddc20a43b7cf70786c8d..ed6d9de0307a5575d1d7d7e53a0db6351cdc74bd 100644
--- a/src/common/proc_args.c
+++ b/src/common/proc_args.c
@@ -135,7 +135,7 @@ int verify_conn_type(const char *arg)
 		return SELECT_NAV;
 
 	error("invalid --conn-type argument %s ignored.", arg);
-	return -1;
+	return NO_VAL;
 }
 
 /*
diff --git a/src/common/read_config.c b/src/common/read_config.c
index 131fb27d360cdd78c820f8fa5bf915cd6ec442f9..90598831f0302859a92425605b260e1ba173eadb 100644
--- a/src/common/read_config.c
+++ b/src/common/read_config.c
@@ -3,6 +3,7 @@
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Portions Copyright (C) 2008 Vijay Ramasubramanian.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>.
  *  LLNL-CODE-402394.
@@ -43,6 +44,7 @@
 #include <assert.h>
 #include <ctype.h>
 #include <errno.h>
+#include <netdb.h>
 #include <pwd.h>
 #include <stdio.h>
 #include <stdlib.h>
@@ -67,6 +69,7 @@
 #include "src/common/parse_config.h"
 #include "src/common/parse_time.h"
 #include "src/common/slurm_selecttype_info.h"
+#include "src/common/util-net.h"
 
 /* Instantiation of the "extern slurm_ctl_conf_t slurmcltd_conf"
  * found in slurmctld.h */
@@ -145,6 +148,7 @@ s_p_options_t slurm_conf_options[] = {
 	{"DefaultStorageUser", S_P_STRING},
 	{"DefMemPerTask", S_P_UINT32},
 	{"DisableRootJobs", S_P_BOOLEAN},
+	{"EnforcePartLimits", S_P_BOOLEAN},
 	{"Epilog", S_P_STRING},
 	{"EpilogMsgTime", S_P_UINT32},
 	{"FastSchedule", S_P_UINT16},
@@ -927,6 +931,58 @@ extern char *slurm_conf_get_nodename(const char *node_hostname)
 	return NULL;
 }
 
+/*
+ * slurm_conf_get_aliased_nodename - Return the NodeName for the
+ * complete hostname string returned by gethostname if there is
+ * such a match, otherwise iterate through any aliases returned
+ * by get_host_by_name
+ */
+extern char *slurm_conf_get_aliased_nodename()
+{
+	char hostname_full[1024];
+	int error_code;
+	char *nodename;
+
+	error_code = gethostname(hostname_full, sizeof(hostname_full));
+	/* we shouldn't have any problem here since by the time
+	 * this function has been called, gethostname_short,
+	 * which invokes gethostname, has probably already been called
+	 * successfully, so just return NULL if something weird
+	 * happens at this point
+	 */
+	if (error_code)
+		return NULL;
+
+	nodename = slurm_conf_get_nodename(hostname_full);
+	/* if the full hostname did not match a nodename */
+	if (nodename == NULL) {
+		/* use get_host_by_name; buffer sizes, semantics, etc.
+		 * copied from slurm_protocol_socket_implementation.c
+		 */
+		struct hostent * he = NULL;
+		char * h_buf[4096];
+		int h_err;
+
+		he = get_host_by_name(hostname_full, (void *)&h_buf,
+				      sizeof(h_buf), &h_err);
+		if (he != NULL) {
+			unsigned int i = 0;
+			/* check the "official" host name first */
+			nodename = slurm_conf_get_nodename(he->h_name);
+			while ((nodename == NULL) &&
+			       (he->h_aliases[i] != NULL)) {
+				/* the "official" name still didn't match --
+				 * iterate through the aliases */
+				nodename =
+				     slurm_conf_get_nodename(he->h_aliases[i]);
+				i++;
+			}
+		}
+	}
+
+	return nodename;
+}
+
 /*
  * slurm_conf_get_port - Return the port for a given NodeName
  */
@@ -1153,6 +1209,7 @@ init_slurm_conf (slurm_ctl_conf_t *ctl_conf_ptr)
 	xfree (ctl_conf_ptr->crypto_type);
 	ctl_conf_ptr->def_mem_per_task          = 0;
 	ctl_conf_ptr->disable_root_jobs         = 0;
+	ctl_conf_ptr->enforce_part_limits       = 0;
 	xfree (ctl_conf_ptr->epilog);
 	ctl_conf_ptr->epilog_msg_time		= (uint32_t) NO_VAL;
 	ctl_conf_ptr->fast_schedule		= (uint16_t) NO_VAL;
@@ -1535,6 +1592,10 @@ validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 			     "DisableRootJobs", hashtbl))
 		conf->disable_root_jobs = DEFAULT_DISABLE_ROOT_JOBS;
 
+	if (!s_p_get_boolean((bool *) &conf->enforce_part_limits, 
+			     "EnforcePartLimits", hashtbl))
+		conf->disable_root_jobs = DEFAULT_ENFORCE_PART_LIMITS;
+
 	s_p_get_string(&conf->epilog, "Epilog", hashtbl);
 
 	if (!s_p_get_uint32(&conf->epilog_msg_time, "EpilogMsgTime", hashtbl))
diff --git a/src/common/read_config.h b/src/common/read_config.h
index 8badcb691d4a0c3381bfc5a3d129b4f658b33a2d..cddc90068df4f3ff9030a6e939b2398279da16d7 100644
--- a/src/common/read_config.h
+++ b/src/common/read_config.h
@@ -2,7 +2,9 @@
  *  read_config.h - definitions for reading the overall slurm configuration 
  *  file
  *****************************************************************************
- *  Copyright (C) 2002-2006 The Regents of the University of California.
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Portions Copyright (C) 2008 Vijay Ramasubramanian.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Mette <jette1@llnl.gov>.
  *  LLNL-CODE-402394.
@@ -64,6 +66,7 @@ extern char *default_plugstack;
 #define DEFAULT_JOB_ACCT_GATHER_FREQ  30
 #define ACCOUNTING_STORAGE_TYPE_NONE "accounting_storage/none"
 #define DEFAULT_DISABLE_ROOT_JOBS   0
+#define DEFAULT_ENFORCE_PART_LIMITS 0
 #define DEFAULT_JOB_COMP_TYPE       "jobcomp/none"
 #define DEFAULT_JOB_COMP_LOC        "/var/log/slurm_jobcomp.log"
 #define DEFAULT_KILL_TREE           0
@@ -266,6 +269,17 @@ extern char *slurm_conf_get_hostname(const char *node_name);
  */
 extern char *slurm_conf_get_nodename(const char *node_hostname);
 
+/*
+ * slurm_conf_get_aliased_nodename - Return the NodeName matching an alias
+ * of the local hostname
+ *
+ * Returned string was allocated with xmalloc(), and must be freed by
+ * the caller using xfree().
+ *
+ * NOTE: Caller must NOT be holding slurm_conf_lock().
+ */
+extern char *slurm_conf_get_aliased_nodename(void);
+
 /*
  * slurm_conf_get_port - Return the port for a given NodeName
  *
diff --git a/src/common/slurm_accounting_storage.c b/src/common/slurm_accounting_storage.c
index 31d94d6277282d9f514827ac56e4b77fe4ce61c5..f7f545d37766e50fd5c94350f3f9a14e423d1ddd 100644
--- a/src/common/slurm_accounting_storage.c
+++ b/src/common/slurm_accounting_storage.c
@@ -64,7 +64,7 @@ typedef struct slurm_acct_storage_ops {
 	int  (*add_users)          (void *db_conn, uint32_t uid,
 				    List user_list);
 	int  (*add_coord)          (void *db_conn, uint32_t uid,
-				    char *acct,
+				    List acct_list,
 				    acct_user_cond_t *user_q);
 	int  (*add_accts)          (void *db_conn, uint32_t uid,
 				    List acct_list);
@@ -87,7 +87,7 @@ typedef struct slurm_acct_storage_ops {
 	List (*remove_users)       (void *db_conn, uint32_t uid,
 				    acct_user_cond_t *user_q);
 	List (*remove_coord)       (void *db_conn, uint32_t uid,
-				    char *acct,
+				    List acct_list,
 				    acct_user_cond_t *user_q);
 	List (*remove_accts)       (void *db_conn, uint32_t uid,
 				    acct_account_cond_t *acct_q);
@@ -139,6 +139,8 @@ typedef struct slurm_acct_storage_ops {
 				    List selected_steps,
 				    List selected_parts,
 				    void *params);	
+	List (*get_jobs_cond)      (void *db_conn,
+				    acct_job_cond_t *job_cond);	
 	void (*job_archive)        (void *db_conn,
 				    List selected_parts, void *params);	
 	int (*update_shares_used)  (void *db_conn,
@@ -214,12 +216,23 @@ static slurm_acct_storage_ops_t * _acct_storage_get_ops(
 		"jobacct_storage_p_step_complete",
 		"jobacct_storage_p_suspend",
 		"jobacct_storage_p_get_jobs",
+		"jobacct_storage_p_get_jobs_cond",
 		"jobacct_storage_p_archive",
 		"acct_storage_p_update_shares_used",
 		"acct_storage_p_flush_jobs_on_cluster"
 	};
 	int n_syms = sizeof( syms ) / sizeof( char * );
 
+	/* Find the correct plugin. */
+        c->cur_plugin = plugin_load_and_link(c->acct_storage_type, n_syms, syms,
+					     (void **) &c->ops);
+        if ( c->cur_plugin != PLUGIN_INVALID_HANDLE ) 
+        	return &c->ops;
+
+	error("Couldn't find the specified plugin name for %s "
+	      "looking at all files",
+	      c->acct_storage_type);
+
 	/* Get plugin list. */
 	if ( c->plugin_list == NULL ) {
 		char *plugin_dir;
@@ -292,6 +305,8 @@ static int _acct_storage_context_destroy(slurm_acct_storage_context_t *c)
 		if ( plugrack_destroy( c->plugin_list ) != SLURM_SUCCESS ) {
 			return SLURM_ERROR;
 		}
+	} else {
+		plugin_unload(c->cur_plugin);
 	}
 
 	xfree( c->acct_storage_type );
@@ -458,6 +473,30 @@ extern void destroy_acct_association_cond(void *object)
 	}
 }
 
+extern void destroy_acct_job_cond(void *object)
+{
+	acct_job_cond_t *job_cond = 
+		(acct_job_cond_t *)object;
+
+	if(job_cond) {
+		if(job_cond->acct_list)
+			list_destroy(job_cond->acct_list);
+		if(job_cond->associd_list)
+			list_destroy(job_cond->associd_list);
+		if(job_cond->cluster_list)
+			list_destroy(job_cond->cluster_list);
+		if(job_cond->groupid_list)
+			list_destroy(job_cond->groupid_list);
+		if(job_cond->partition_list)
+			list_destroy(job_cond->partition_list);
+		if(job_cond->step_list)
+			list_destroy(job_cond->step_list);
+		if(job_cond->user_list)
+			list_destroy(job_cond->user_list);
+		xfree(job_cond);
+	}
+}
+
 extern void destroy_acct_update_object(void *object)
 {
 	acct_update_object_t *acct_update = 
@@ -546,7 +585,9 @@ extern int unpack_acct_user_rec(void **object, Buf buffer)
 		object_ptr->assoc_list =
 			list_create(destroy_acct_association_rec);
 		for(i=0; i<count; i++) {
-			unpack_acct_association_rec((void *)&assoc, buffer);
+			if(unpack_acct_association_rec((void *)&assoc, buffer)
+			   == SLURM_ERROR)
+				goto unpack_error;
 			list_append(object_ptr->assoc_list, assoc);
 		}
 	}
@@ -554,7 +595,9 @@ extern int unpack_acct_user_rec(void **object, Buf buffer)
 	if(count) {
 		object_ptr->coord_accts = list_create(destroy_acct_coord_rec);
 		for(i=0; i<count; i++) {
-			unpack_acct_coord_rec((void *)&coord, buffer);
+			if(unpack_acct_coord_rec((void *)&coord, buffer)
+			   == SLURM_ERROR)
+				goto unpack_error;
 			list_append(object_ptr->coord_accts, coord);
 		}
 	}
@@ -659,7 +702,9 @@ extern int unpack_acct_account_rec(void **object, Buf buffer)
 		object_ptr->assoc_list =
 			list_create(destroy_acct_association_rec);
 		for(i=0; i<count; i++) {
-			unpack_acct_association_rec((void *)&assoc, buffer);
+			if(unpack_acct_association_rec((void *)&assoc, buffer)
+			   == SLURM_ERROR)
+				goto unpack_error;
 			list_append(object_ptr->assoc_list, assoc);
 		}
 	}
@@ -706,6 +751,7 @@ extern int unpack_acct_coord_rec(void **object, Buf buffer)
 	*object = object_ptr;
 	safe_unpackstr_xmalloc(&object_ptr->acct_name, &uint32_tmp, buffer);
 	safe_unpack16(&object_ptr->sub_acct, buffer);
+	return SLURM_SUCCESS;
 
 unpack_error:
 	destroy_acct_coord_rec(object_ptr);
@@ -718,21 +764,23 @@ extern void pack_cluster_accounting_rec(void *in, Buf buffer)
 	cluster_accounting_rec_t *object = (cluster_accounting_rec_t *)in;
 	
 	if(!object) {
+		pack64(0, buffer);
 		pack32(0, buffer);
-		pack32(0, buffer);
-		pack32(0, buffer);
-		pack32(0, buffer);
+		pack64(0, buffer);
+		pack64(0, buffer);
+		pack64(0, buffer);
 		pack_time(0, buffer);
-		pack32(0, buffer);
+		pack64(0, buffer);
 		return;
 	}
 
- 	pack32(object->alloc_secs, buffer);
+ 	pack64(object->alloc_secs, buffer);
 	pack32(object->cpu_count, buffer);
-	pack32(object->down_secs, buffer);
-	pack32(object->idle_secs, buffer);
+	pack64(object->down_secs, buffer);
+	pack64(object->idle_secs, buffer);
+	pack64(object->over_secs, buffer);
 	pack_time(object->period_start, buffer);
-	pack32(object->resv_secs, buffer);
+	pack64(object->resv_secs, buffer);
 }
 
 extern int unpack_cluster_accounting_rec(void **object, Buf buffer)
@@ -741,12 +789,13 @@ extern int unpack_cluster_accounting_rec(void **object, Buf buffer)
 		xmalloc(sizeof(cluster_accounting_rec_t));
 	
 	*object = object_ptr;
-	safe_unpack32(&object_ptr->alloc_secs, buffer);
+	safe_unpack64(&object_ptr->alloc_secs, buffer);
 	safe_unpack32(&object_ptr->cpu_count, buffer);
-	safe_unpack32(&object_ptr->down_secs, buffer);
-	safe_unpack32(&object_ptr->idle_secs, buffer);
+	safe_unpack64(&object_ptr->down_secs, buffer);
+	safe_unpack64(&object_ptr->idle_secs, buffer);
+	safe_unpack64(&object_ptr->over_secs, buffer);
 	safe_unpack_time(&object_ptr->period_start, buffer);
-	safe_unpack32(&object_ptr->resv_secs, buffer);
+	safe_unpack64(&object_ptr->resv_secs, buffer);
 	
 	return SLURM_SUCCESS;
 
@@ -843,13 +892,15 @@ extern void pack_acct_accounting_rec(void *in, Buf buffer)
 	acct_accounting_rec_t *object = (acct_accounting_rec_t *)in;
 	
 	if(!object) {
-		pack_time(0, buffer);
+		pack64(0, buffer);
 		pack32(0, buffer);
+		pack_time(0, buffer);
 		return;
 	}
 
+	pack64(object->alloc_secs, buffer);
+	pack32(object->assoc_id, buffer);
 	pack_time(object->period_start, buffer);
-	pack32(object->alloc_secs, buffer);
 }
 
 extern int unpack_acct_accounting_rec(void **object, Buf buffer)
@@ -858,8 +909,9 @@ extern int unpack_acct_accounting_rec(void **object, Buf buffer)
 		xmalloc(sizeof(acct_accounting_rec_t));
 	
 	*object = object_ptr;
+	safe_unpack64(&object_ptr->alloc_secs, buffer);
+	safe_unpack32(&object_ptr->assoc_id, buffer);
 	safe_unpack_time(&object_ptr->period_start, buffer);
-	safe_unpack32(&object_ptr->alloc_secs, buffer);
 
 	return SLURM_SUCCESS;
 
@@ -886,11 +938,13 @@ extern void pack_acct_association_rec(void *in, Buf buffer)
 		pack32(0, buffer);
 		pack32(0, buffer);
 		pack32(0, buffer);
+		pack32(0, buffer);
 		packnull(buffer);
 		pack32(0, buffer);
 		packnull(buffer);
 		pack32(0, buffer);
 		pack32(0, buffer);
+		pack32(0, buffer);
 		packnull(buffer);
 		return;
 	}
@@ -913,6 +967,7 @@ extern void pack_acct_association_rec(void *in, Buf buffer)
 	packstr(object->cluster, buffer);
 	pack32(object->fairshare, buffer);
 	pack32(object->id, buffer);
+	pack32(object->lft, buffer);
 	pack32(object->max_cpu_secs_per_job, buffer);
 	pack32(object->max_jobs, buffer);
 	pack32(object->max_nodes_per_job, buffer);
@@ -920,6 +975,7 @@ extern void pack_acct_association_rec(void *in, Buf buffer)
 	packstr(object->parent_acct, buffer);
 	pack32(object->parent_id, buffer);
 	packstr(object->partition, buffer);
+	pack32(object->rgt, buffer);
 	pack32(object->uid, buffer);
 	pack32(object->used_share, buffer);
 	packstr(object->user, buffer);	
@@ -941,7 +997,9 @@ extern int unpack_acct_association_rec(void **object, Buf buffer)
 		object_ptr->accounting_list =
 			list_create(destroy_acct_accounting_rec);
 		for(i=0; i<count; i++) {
-			unpack_acct_accounting_rec((void **)&acct_info, buffer);
+			if(unpack_acct_accounting_rec((void **)&acct_info,
+						      buffer) == SLURM_ERROR)
+				goto unpack_error;
 			list_append(object_ptr->accounting_list, acct_info);
 		}
 	}
@@ -949,6 +1007,7 @@ extern int unpack_acct_association_rec(void **object, Buf buffer)
 	safe_unpackstr_xmalloc(&object_ptr->cluster, &uint32_tmp, buffer);
 	safe_unpack32(&object_ptr->fairshare, buffer);
 	safe_unpack32(&object_ptr->id, buffer);
+	safe_unpack32(&object_ptr->lft, buffer);
 	safe_unpack32(&object_ptr->max_cpu_secs_per_job, buffer);
 	safe_unpack32(&object_ptr->max_jobs, buffer);
 	safe_unpack32(&object_ptr->max_nodes_per_job, buffer);
@@ -956,6 +1015,7 @@ extern int unpack_acct_association_rec(void **object, Buf buffer)
 	safe_unpackstr_xmalloc(&object_ptr->parent_acct, &uint32_tmp, buffer);
 	safe_unpack32(&object_ptr->parent_id, buffer);
 	safe_unpackstr_xmalloc(&object_ptr->partition, &uint32_tmp, buffer);
+	safe_unpack32(&object_ptr->rgt, buffer);
 	safe_unpack32(&object_ptr->uid, buffer);
 	safe_unpack32(&object_ptr->used_share, buffer);
 	safe_unpackstr_xmalloc(&object_ptr->user, &uint32_tmp, buffer);
@@ -983,6 +1043,8 @@ extern void pack_acct_user_cond(void *in, Buf buffer)
 		pack16(0, buffer);
 		pack32(0, buffer);
 		pack16(0, buffer);
+		pack16(0, buffer);
+		pack16(0, buffer);
 		return;
 	}
  
@@ -1019,6 +1081,8 @@ extern void pack_acct_user_cond(void *in, Buf buffer)
 		list_iterator_destroy(itr);
 	}
 	pack16((uint16_t)object->with_assocs, buffer);
+	pack16((uint16_t)object->with_coords, buffer);
+	pack16((uint16_t)object->with_deleted, buffer);
 
 }
 
@@ -1056,6 +1120,8 @@ extern int unpack_acct_user_cond(void **object, Buf buffer)
 		}
 	}
 	safe_unpack16((uint16_t *)&object_ptr->with_assocs, buffer);
+	safe_unpack16((uint16_t *)&object_ptr->with_coords, buffer);
+	safe_unpack16((uint16_t *)&object_ptr->with_deleted, buffer);
 
 	return SLURM_SUCCESS;
 
@@ -1079,6 +1145,7 @@ extern void pack_acct_account_cond(void *in, Buf buffer)
 		pack32(0, buffer);
 		pack16(0, buffer);
 		pack16(0, buffer);
+		pack16(0, buffer);
 		return;
 	}
  	if(object->acct_list)
@@ -1125,6 +1192,7 @@ extern void pack_acct_account_cond(void *in, Buf buffer)
 	}
 	pack16((uint16_t)object->qos, buffer);
 	pack16((uint16_t)object->with_assocs, buffer);
+	pack16((uint16_t)object->with_deleted, buffer);
 }
 
 extern int unpack_acct_account_cond(void **object, Buf buffer)
@@ -1167,6 +1235,7 @@ extern int unpack_acct_account_cond(void **object, Buf buffer)
 	}
 	safe_unpack16((uint16_t *)&object_ptr->qos, buffer);
 	safe_unpack16((uint16_t *)&object_ptr->with_assocs, buffer);
+	safe_unpack16((uint16_t *)&object_ptr->with_deleted, buffer);
 
 	return SLURM_SUCCESS;
 
@@ -1185,6 +1254,9 @@ extern void pack_acct_cluster_cond(void *in, Buf buffer)
 
 	if(!object) {
 		pack32(0, buffer);
+		pack32(0, buffer);
+		pack32(0, buffer);
+		pack16(0, buffer);
 		pack16(0, buffer);
 		return;
 	}
@@ -1201,6 +1273,12 @@ extern void pack_acct_cluster_cond(void *in, Buf buffer)
 		}
 		list_iterator_destroy(itr);
 	}
+
+	pack32(object->usage_end, buffer);
+	pack32(object->usage_start, buffer);
+
+	pack16((uint16_t)object->with_usage, buffer);
+	pack16((uint16_t)object->with_deleted, buffer);
 }
 
 extern int unpack_acct_cluster_cond(void **object, Buf buffer)
@@ -1220,6 +1298,11 @@ extern int unpack_acct_cluster_cond(void **object, Buf buffer)
 			list_append(object_ptr->cluster_list, tmp_info);
 		}
 	}
+	safe_unpack32(&object_ptr->usage_end, buffer);
+	safe_unpack32(&object_ptr->usage_start, buffer);
+
+	safe_unpack16((uint16_t *)&object_ptr->with_usage, buffer);
+	safe_unpack16((uint16_t *)&object_ptr->with_deleted, buffer);
 
 	return SLURM_SUCCESS;
 
@@ -1249,6 +1332,10 @@ extern void pack_acct_association_cond(void *in, Buf buffer)
 		pack32(0, buffer);
 		packnull(buffer);
 		pack32(0, buffer);
+		pack32(0, buffer);
+		pack32(0, buffer);
+		pack16(0, buffer);
+		pack16(0, buffer);
 		return;
 	}
 
@@ -1312,6 +1399,9 @@ extern void pack_acct_association_cond(void *in, Buf buffer)
 
 	packstr(object->parent_acct, buffer);
 
+	pack32(object->usage_end, buffer);
+	pack32(object->usage_start, buffer);
+
 	if(object->user_list)
 		count = list_count(object->user_list);
 	
@@ -1324,6 +1414,9 @@ extern void pack_acct_association_cond(void *in, Buf buffer)
 		list_iterator_destroy(itr);
 	}
 	count = 0;
+
+	pack16((uint16_t)object->with_usage, buffer);
+	pack16((uint16_t)object->with_deleted, buffer);
 }
 
 extern int unpack_acct_association_cond(void **object, Buf buffer)
@@ -1380,6 +1473,9 @@ extern int unpack_acct_association_cond(void **object, Buf buffer)
 
 	safe_unpackstr_xmalloc(&object_ptr->parent_acct, &uint32_tmp, buffer);
 
+	safe_unpack32(&object_ptr->usage_end, buffer);
+	safe_unpack32(&object_ptr->usage_start, buffer);
+
 	safe_unpack32(&count, buffer);
 	if(count) {
 		object_ptr->user_list = list_create(slurm_destroy_char);
@@ -1389,6 +1485,8 @@ extern int unpack_acct_association_cond(void **object, Buf buffer)
 		}
 	}
 
+	safe_unpack16(&object_ptr->with_usage, buffer);
+	safe_unpack16((uint16_t *)&object_ptr->with_deleted, buffer);
 	return SLURM_SUCCESS;
 
 unpack_error:
@@ -1397,6 +1495,212 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
+extern void pack_acct_job_cond(void *in, Buf buffer)
+{
+	char *tmp_info = NULL;
+	jobacct_selected_step_t *job = NULL;
+	uint32_t count = 0;
+
+	ListIterator itr = NULL;
+	acct_job_cond_t *object = (acct_job_cond_t *)in;
+
+	if(!object) {
+		pack32(0, buffer);
+		pack32(0, buffer);
+		pack32(0, buffer);
+		pack16(0, buffer);
+		pack32(0, buffer);
+		pack32(0, buffer);
+		pack32(0, buffer);
+		pack32(0, buffer);
+		pack32(0, buffer);
+		pack32(0, buffer);
+		return;
+	}
+
+	if(object->acct_list)
+		count = list_count(object->acct_list);
+	
+	pack32(count, buffer);
+	if(count) {
+		itr = list_iterator_create(object->acct_list);
+		while((tmp_info = list_next(itr))) {
+			packstr(tmp_info, buffer);
+		}
+		list_iterator_destroy(itr);
+	}
+	count = 0;
+
+	if(object->associd_list)
+		count = list_count(object->associd_list);
+	
+	pack32(count, buffer);
+	if(count) {
+		itr = list_iterator_create(object->associd_list);
+		while((tmp_info = list_next(itr))) {
+			packstr(tmp_info, buffer);
+		}
+	}
+	count = 0;
+
+	if(object->cluster_list)
+		count = list_count(object->cluster_list);
+	
+	pack32(count, buffer);
+	if(count) {
+		itr = list_iterator_create(object->cluster_list);
+		while((tmp_info = list_next(itr))) {
+			packstr(tmp_info, buffer);
+		}
+		list_iterator_destroy(itr);
+	}
+	count = 0;
+
+	pack16(object->completion, buffer);
+
+	if(object->groupid_list)
+		count = list_count(object->groupid_list);
+	
+	pack32(count, buffer);
+	if(count) {
+		itr = list_iterator_create(object->groupid_list);
+		while((tmp_info = list_next(itr))) {
+			packstr(tmp_info, buffer);
+		}
+	}
+	count = 0;
+	
+	if(object->partition_list)
+		count = list_count(object->partition_list);
+	
+	pack32(count, buffer);
+	if(count) {
+		itr = list_iterator_create(object->partition_list);
+		while((tmp_info = list_next(itr))) {
+			packstr(tmp_info, buffer);
+		}
+		list_iterator_destroy(itr);
+	}
+	count = 0;
+
+	if(object->step_list)
+		count = list_count(object->step_list);
+	
+	pack32(count, buffer);
+	if(count) {
+		itr = list_iterator_create(object->step_list);
+		while((job = list_next(itr))) {
+			pack_jobacct_selected_step(job, buffer);
+		}
+		list_iterator_destroy(itr);
+	}
+	count = 0;
+
+	pack32(object->usage_end, buffer);
+	pack32(object->usage_start, buffer);
+
+	if(object->user_list)
+		count = list_count(object->user_list);
+	
+	pack32(count, buffer);
+	if(count) {
+		itr = list_iterator_create(object->user_list);
+		while((tmp_info = list_next(itr))) {
+			packstr(tmp_info, buffer);
+		}
+		list_iterator_destroy(itr);
+	}
+	count = 0;
+}
+
+extern int unpack_acct_job_cond(void **object, Buf buffer)
+{
+	uint32_t uint32_tmp;
+	int i;
+	uint32_t count;
+	acct_job_cond_t *object_ptr = xmalloc(sizeof(acct_job_cond_t));
+	char *tmp_info = NULL;
+	jobacct_selected_step_t *job = NULL;
+
+	*object = object_ptr;
+	safe_unpack32(&count, buffer);
+	if(count) {
+		object_ptr->acct_list = list_create(slurm_destroy_char);
+		for(i=0; i<count; i++) {
+			safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer);
+			list_append(object_ptr->acct_list, tmp_info);
+		}
+	}
+
+	safe_unpack32(&count, buffer);
+	if(count) {
+		object_ptr->associd_list = list_create(slurm_destroy_char);
+		for(i=0; i<count; i++) {
+			safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer);
+			list_append(object_ptr->associd_list, tmp_info);
+		}
+	}
+
+	safe_unpack32(&count, buffer);
+	if(count) {
+		object_ptr->cluster_list = list_create(slurm_destroy_char);
+		for(i=0; i<count; i++) {
+			safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer);
+			list_append(object_ptr->cluster_list, tmp_info);
+		}
+	}
+
+	safe_unpack16(&object_ptr->completion, buffer);
+
+	safe_unpack32(&count, buffer);
+	if(count) {
+		object_ptr->groupid_list = list_create(slurm_destroy_char);
+		for(i=0; i<count; i++) {
+			safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer);
+			list_append(object_ptr->groupid_list, tmp_info);
+		}
+	}
+	
+	safe_unpack32(&count, buffer);
+	if(count) {
+		object_ptr->partition_list = list_create(slurm_destroy_char);
+		for(i=0; i<count; i++) {
+			safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer);
+			list_append(object_ptr->partition_list, tmp_info);
+		}
+	}
+
+
+	safe_unpack32(&count, buffer);
+	if(count) {
+		object_ptr->step_list =
+			list_create(destroy_jobacct_selected_step);
+		for(i=0; i<count; i++) {
+			unpack_jobacct_selected_step(&job, buffer);
+			list_append(object_ptr->step_list, job);
+		}
+	}
+
+	safe_unpack32(&object_ptr->usage_end, buffer);
+	safe_unpack32(&object_ptr->usage_start, buffer);
+
+	safe_unpack32(&count, buffer);
+	if(count) {
+		object_ptr->user_list = list_create(slurm_destroy_char);
+		for(i=0; i<count; i++) {
+			safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer);
+			list_append(object_ptr->user_list, tmp_info);
+		}
+	}
+
+	return SLURM_SUCCESS;
+
+unpack_error:
+	destroy_acct_job_cond(object_ptr);
+	*object = NULL;
+	return SLURM_ERROR;
+}
+
 extern void pack_acct_update_object(acct_update_object_t *object, Buf buffer)
 {
 	uint32_t count = 0;
@@ -1409,6 +1713,8 @@ extern void pack_acct_update_object(acct_update_object_t *object, Buf buffer)
 	case ACCT_MODIFY_USER:
 	case ACCT_ADD_USER:
 	case ACCT_REMOVE_USER:
+	case ACCT_ADD_COORD:
+	case ACCT_REMOVE_COORD:
 		my_function = pack_acct_user_rec;
 		break;
 	case ACCT_ADD_ASSOC:
@@ -1451,6 +1757,8 @@ extern int unpack_acct_update_object(acct_update_object_t **object, Buf buffer)
 	case ACCT_MODIFY_USER:
 	case ACCT_ADD_USER:
 	case ACCT_REMOVE_USER:
+	case ACCT_ADD_COORD:
+	case ACCT_REMOVE_COORD:
 		my_function = unpack_acct_user_rec;
 		my_destroy = destroy_acct_user_rec;
 		break;
@@ -1686,12 +1994,12 @@ extern int acct_storage_g_add_users(void *db_conn, uint32_t uid,
 }
 
 extern int acct_storage_g_add_coord(void *db_conn, uint32_t uid,
-				    char *acct, acct_user_cond_t *user_q)
+				    List acct_list, acct_user_cond_t *user_q)
 {
 	if (slurm_acct_storage_init(NULL) < 0)
 		return SLURM_ERROR;
 	return (*(g_acct_storage_context->ops.add_coord))
-		(db_conn, uid, acct, user_q);
+		(db_conn, uid, acct_list, user_q);
 }
 
 extern int acct_storage_g_add_accounts(void *db_conn, uint32_t uid,
@@ -1771,12 +2079,13 @@ extern List acct_storage_g_remove_users(void *db_conn, uint32_t uid,
 }
 
 extern List acct_storage_g_remove_coord(void *db_conn, uint32_t uid,
-				       char *acct, acct_user_cond_t *user_q)
+					List acct_list,
+					acct_user_cond_t *user_q)
 {
 	if (slurm_acct_storage_init(NULL) < 0)
 		return NULL;
 	return (*(g_acct_storage_context->ops.remove_coord))
-		(db_conn, uid, acct, user_q);
+		(db_conn, uid, acct_list, user_q);
 }
 
 extern List acct_storage_g_remove_accounts(void *db_conn, uint32_t uid,
@@ -1984,6 +2293,20 @@ extern List jobacct_storage_g_get_jobs(void *db_conn,
 		(db_conn, selected_steps, selected_parts, params);
 }
 
+/* 
+ * get info from the storage 
+ * returns List of job_rec_t *
+ * note List needs to be freed when called
+ */
+extern List jobacct_storage_g_get_jobs_cond(void *db_conn,
+					    acct_job_cond_t *job_cond)
+{
+	if (slurm_acct_storage_init(NULL) < 0)
+		return NULL;
+ 	return (*(g_acct_storage_context->ops.get_jobs_cond))
+		(db_conn, job_cond);
+}
+
 /* 
  * expire old info from the storage 
  */
@@ -1992,7 +2315,8 @@ extern void jobacct_storage_g_archive(void *db_conn,
 {
 	if (slurm_acct_storage_init(NULL) < 0)
 		return;
- 	(*(g_acct_storage_context->ops.job_archive))(db_conn, selected_parts, params);
+ 	(*(g_acct_storage_context->ops.job_archive))(db_conn, selected_parts,
+						     params);
 	return;
 }
 
diff --git a/src/common/slurm_accounting_storage.h b/src/common/slurm_accounting_storage.h
index 710d0457a50cc10387de02ea407d8400a8cb55dc..d3c204d81c8b2fdcc7798383e7ad8af09ba2b9e7 100644
--- a/src/common/slurm_accounting_storage.h
+++ b/src/common/slurm_accounting_storage.h
@@ -65,10 +65,12 @@ typedef enum {
 	ACCT_UPDATE_NOTSET,
 	ACCT_ADD_USER,
 	ACCT_ADD_ASSOC,
+	ACCT_ADD_COORD,
 	ACCT_MODIFY_USER,
 	ACCT_MODIFY_ASSOC,
 	ACCT_REMOVE_USER,
-	ACCT_REMOVE_ASSOC
+	ACCT_REMOVE_ASSOC,
+	ACCT_REMOVE_COORD
 } acct_update_type_t;
 
 /* Association conditions used for queries of the database */
@@ -87,7 +89,11 @@ typedef struct {
 					     * can run a job (seconds) */
 	List partition_list;	/* list of char * */
 	char *parent_acct;	/* name of parent account */
+	uint32_t usage_end; 
+	uint32_t usage_start; 
 	List user_list;		/* list of char * */
+	uint16_t with_usage; 
+	uint16_t with_deleted; 
 } acct_association_cond_t;
 
 typedef struct {
@@ -97,6 +103,7 @@ typedef struct {
 	List organization_list; /* list of char * */
 	acct_qos_level_t qos;	
 	uint16_t with_assocs; 
+	uint16_t with_deleted; 
 } acct_account_cond_t;
 
 typedef struct {
@@ -109,7 +116,8 @@ typedef struct {
 } acct_account_rec_t;
 
 typedef struct {
-	uint32_t alloc_secs; /* number of cpu seconds allocated */
+	uint64_t alloc_secs; /* number of cpu seconds allocated */
+	uint32_t assoc_id;	/* association ID		*/
 	time_t period_start; 
 } acct_accounting_rec_t;
 
@@ -120,6 +128,9 @@ typedef struct acct_association_rec {
 	uint32_t fairshare;	/* fairshare number */
 	uint32_t id;		/* id identifing a combination of
 				 * user-account-cluster(-partition) */
+	uint32_t lft;		/* lft used for grouping sub
+				 * associations and jobs as a left
+				 * most container used with rgt */
 	uint32_t max_cpu_secs_per_job; /* max number of cpu seconds this 
 					   * association can have per job */
 	uint32_t max_jobs;	/* max number of jobs this association can run
@@ -134,6 +145,9 @@ typedef struct acct_association_rec {
 	uint32_t parent_id;	/* id of parent account */
 	char *partition;	/* optional partition in a cluster 
 				 * associated to association */
+	uint32_t rgt;		/* rgt used for grouping sub
+				 * associations and jobs as a right
+				 * most container used with lft */
 	uint32_t uid;		/* user ID */
 	uint32_t used_jobs;	/* count of active jobs */
 	uint32_t used_share;	/* measure of resource usage */
@@ -142,6 +156,10 @@ typedef struct acct_association_rec {
 
 typedef struct {
 	List cluster_list; /* list of char * */
+	uint32_t usage_end; 
+	uint32_t usage_start; 
+	uint16_t with_usage; 
+	uint16_t with_deleted; 
 } acct_cluster_cond_t;
 
 typedef struct {
@@ -149,12 +167,13 @@ typedef struct {
 	char *control_host;
 	uint32_t control_port;
 	uint32_t default_fairshare;	/* fairshare number */
-	uint32_t default_max_cpu_secs_per_job; /* max number of cpu seconds this 
-					* association can have per job */
-	uint32_t default_max_jobs;	/* max number of jobs this association can run
-				 * at one time */
+	uint32_t default_max_cpu_secs_per_job;/* max number of cpu seconds this 
+					       * association can have per job */
+	uint32_t default_max_jobs;/* max number of jobs this association can run
+				   * at one time */
 	uint32_t default_max_nodes_per_job; /* max number of nodes this
-				     * association can allocate per job */
+					     * association can
+					     * allocate per job */
 	uint32_t default_max_wall_duration_per_job; /* longest time this
 					     * association can run a job */
 	char *name;
@@ -166,6 +185,20 @@ typedef struct {
 	uint16_t sub_acct;
 } acct_coord_rec_t;
 
+typedef struct {
+	List acct_list;		/* list of char * */
+	List associd_list;	/* list of char */
+	List cluster_list;	/* list of char * */
+	uint16_t completion;	/* get job completion records instead
+				 * of accounting record */
+	List groupid_list;	/* list of char * */
+	List partition_list;	/* list of char * */
+	List step_list;         /* list of jobacct_selected_step_t */
+	uint32_t usage_end; 
+	uint32_t usage_start; 
+	List user_list;		/* list of char * */
+} acct_job_cond_t;
+
 typedef struct {
 	acct_admin_level_t admin_level;
 	acct_association_cond_t *assoc_cond;
@@ -173,6 +206,8 @@ typedef struct {
 	acct_qos_level_t qos;	
 	List user_list; /* list of char * */
 	uint16_t with_assocs; 
+	uint16_t with_coords; 
+	uint16_t with_deleted; 
 } acct_user_cond_t;
 
 typedef struct {
@@ -196,12 +231,13 @@ typedef struct {
 } shares_used_object_t;
 
 typedef struct {
-	uint32_t alloc_secs; /* number of cpu seconds allocated */
+	uint64_t alloc_secs; /* number of cpu seconds allocated */
 	uint32_t cpu_count; /* number of cpus during time period */
-	uint32_t down_secs; /* number of cpu seconds down */
-	uint32_t idle_secs; /* number of cpu seconds idle */
+	uint64_t down_secs; /* number of cpu seconds down */
+	uint64_t idle_secs; /* number of cpu seconds idle */
+	uint64_t over_secs; /* number of cpu seconds overcommitted */
 	time_t period_start; /* when this record was started */
-	uint32_t resv_secs; /* number of cpu seconds reserved */	
+	uint64_t resv_secs; /* number of cpu seconds reserved */	
 } cluster_accounting_rec_t;
 
 extern void destroy_acct_user_rec(void *object);
@@ -216,10 +252,12 @@ extern void destroy_acct_user_cond(void *object);
 extern void destroy_acct_account_cond(void *object);
 extern void destroy_acct_cluster_cond(void *object);
 extern void destroy_acct_association_cond(void *object);
+extern void destroy_acct_job_cond(void *object);
 
 extern void destroy_acct_update_object(void *object);
 extern void destroy_update_shares_rec(void *object);
 
+
 /* pack functions */
 extern void pack_acct_user_rec(void *object, Buf buffer);
 extern int unpack_acct_user_rec(void **object, Buf buffer);
@@ -244,6 +282,8 @@ extern void pack_acct_cluster_cond(void *object, Buf buffer);
 extern int unpack_acct_cluster_cond(void **object, Buf buffer);
 extern void pack_acct_association_cond(void *object, Buf buffer);
 extern int unpack_acct_association_cond(void **object, Buf buffer);
+extern void pack_acct_job_cond(void *object, Buf buffer);
+extern int unpack_acct_job_cond(void **object, Buf buffer);
 
 extern void pack_acct_update_object(acct_update_object_t *object, Buf buffer);
 extern int unpack_acct_update_object(acct_update_object_t **object, Buf buffer);
@@ -295,12 +335,12 @@ extern int acct_storage_g_add_users(void *db_conn, uint32_t uid,
 
 /* 
  * add users as account coordinators 
- * IN:  acct name of account
+ * IN: acct_list list of char *'s of names of accounts
  * IN:  acct_user_cond_t *user_q
  * RET: SLURM_SUCCESS on success SLURM_ERROR else
  */
 extern int acct_storage_g_add_coord(void *db_conn, uint32_t uid,
-				    char *acct, acct_user_cond_t *user_q);
+				    List acct_list, acct_user_cond_t *user_q);
 
 
 /* 
@@ -331,82 +371,83 @@ extern int acct_storage_g_add_associations(void *db_conn, uint32_t uid,
  * modify existing users in the accounting system 
  * IN:  acct_user_cond_t *user_q
  * IN:  acct_user_rec_t *user
- * RET: SLURM_SUCCESS on success SLURM_ERROR else
+ * RET: List containing (char *'s) else NULL on error
  */
 extern List acct_storage_g_modify_users(void *db_conn, uint32_t uid, 
-				       acct_user_cond_t *user_q,
-				       acct_user_rec_t *user);
+					acct_user_cond_t *user_q,
+					acct_user_rec_t *user);
 
 /* 
  * modify existing accounts in the accounting system 
  * IN:  acct_acct_cond_t *acct_q
  * IN:  acct_account_rec_t *acct
- * RET: SLURM_SUCCESS on success SLURM_ERROR else
+ * RET: List containing (char *'s) else NULL on error
  */
 extern List acct_storage_g_modify_accounts(void *db_conn, uint32_t uid, 
-					  acct_account_cond_t *acct_q,
-					  acct_account_rec_t *acct);
+					   acct_account_cond_t *acct_q,
+					   acct_account_rec_t *acct);
 
 /* 
  * modify existing clusters in the accounting system 
  * IN:  acct_cluster_cond_t *cluster_q
  * IN:  acct_cluster_rec_t *cluster
- * RET: SLURM_SUCCESS on success SLURM_ERROR else
+ * RET: List containing (char *'s) else NULL on error
  */
 extern List acct_storage_g_modify_clusters(void *db_conn, uint32_t uid, 
-					  acct_cluster_cond_t *cluster_q,
-					  acct_cluster_rec_t *cluster);
+					   acct_cluster_cond_t *cluster_q,
+					   acct_cluster_rec_t *cluster);
 
 /* 
  * modify existing associations in the accounting system 
  * IN:  acct_association_cond_t *assoc_q
  * IN:  acct_association_rec_t *assoc
- * RET: SLURM_SUCCESS on success SLURM_ERROR else
+ * RET: List containing (char *'s) else NULL on error
  */
 extern List acct_storage_g_modify_associations(void *db_conn, uint32_t uid, 
-					      acct_association_cond_t *assoc_q,
-					      acct_association_rec_t *assoc);
+					       acct_association_cond_t *assoc_q,
+					       acct_association_rec_t *assoc);
 
 /* 
  * remove users from accounting system 
  * IN:  acct_user_cond_t *user_q
- * RET: SLURM_SUCCESS on success SLURM_ERROR else
+ * RET: List containing (char *'s) else NULL on error
  */
 extern List acct_storage_g_remove_users(void *db_conn, uint32_t uid, 
-				       acct_user_cond_t *user_q);
+					acct_user_cond_t *user_q);
 
 /* 
  * remove users from being a coordinator of an account
- * IN: acct name of acct
+ * IN: acct_list list of char *'s of names of accounts
  * IN: acct_user_cond_t *user_q
- * RET: SLURM_SUCCESS on success SLURM_ERROR else
+ * RET: List containing (char *'s) else NULL on error
  */
 extern List acct_storage_g_remove_coord(void *db_conn, uint32_t uid, 
-				       char *acct, acct_user_cond_t *user_q);
+					List acct_list,
+					acct_user_cond_t *user_q);
 
 /* 
  * remove accounts from accounting system 
  * IN:  acct_account_cond_t *acct_q
- * RET: SLURM_SUCCESS on success SLURM_ERROR else
+ * RET: List containing (char *'s) else NULL on error
  */
 extern List acct_storage_g_remove_accounts(void *db_conn, uint32_t uid, 
-					  acct_account_cond_t *acct_q);
+					   acct_account_cond_t *acct_q);
 
 /* 
  * remove clusters from accounting system 
  * IN:  acct_cluster_cond_t *cluster_q
- * RET: SLURM_SUCCESS on success SLURM_ERROR else
+ * RET: List containing (char *'s) else NULL on error
  */
 extern List acct_storage_g_remove_clusters(void *db_conn, uint32_t uid, 
-					  acct_cluster_cond_t *cluster_q);
+					   acct_cluster_cond_t *cluster_q);
 
 /* 
  * remove associations from accounting system 
  * IN:  acct_association_cond_t *assoc_q
- * RET: SLURM_SUCCESS on success SLURM_ERROR else
+ * RET: List containing (char *'s) else NULL on error
  */
 extern List acct_storage_g_remove_associations(void *db_conn, uint32_t uid, 
-					      acct_association_cond_t *assoc_q);
+					       acct_association_cond_t *assoc_q);
 
 /* 
  * get info from the storage 
@@ -553,6 +594,14 @@ extern List jobacct_storage_g_get_jobs(void *db_conn,
 				       List selected_parts,
 				       void *params);
 
+/* 
+ * get info from the storage 
+ * returns List of jobacct_job_rec_t *
+ * note List needs to be freed when called
+ */
+extern List jobacct_storage_g_get_jobs_cond(void *db_conn, 
+					    acct_job_cond_t *job_cond);
+
 /* 
  * expire old info from the storage 
  */
diff --git a/src/common/slurm_auth.c b/src/common/slurm_auth.c
index 9e2305b5d725343a577946595e1a9265abcf5812..3df7e6d0e1495e035d88a401e4f1260713790c60 100644
--- a/src/common/slurm_auth.c
+++ b/src/common/slurm_auth.c
@@ -138,7 +138,17 @@ slurm_auth_get_ops( slurm_auth_context_t c )
         };
         int n_syms = sizeof( syms ) / sizeof( char * );
 
-        /* Get the plugin list, if needed. */
+ 	/* Find the correct plugin. */
+        c->cur_plugin = plugin_load_and_link(c->auth_type, n_syms, syms,
+					     (void **) &c->ops);
+        if ( c->cur_plugin != PLUGIN_INVALID_HANDLE ) 
+        	return &c->ops;
+
+	error("Couldn't find the specified plugin name for %s "
+	      "looking at all files",
+	      c->auth_type);
+	
+       /* Get the plugin list, if needed. */
         if ( c->plugin_list == NULL ) {
 		char *plugin_dir;
                 c->plugin_list = plugrack_create();
@@ -278,7 +288,9 @@ _slurm_auth_context_destroy( slurm_auth_context_t c )
                 if ( plugrack_destroy( c->plugin_list ) != SLURM_SUCCESS ) {
                         return SLURM_ERROR;
                 }
-        }  
+        } else {
+		plugin_unload(c->cur_plugin);
+	}
 
         xfree( c->auth_type );
         xfree( c );
diff --git a/src/common/slurm_cred.c b/src/common/slurm_cred.c
index ed7dd43198c1bc1cfdac2e5e8b56a9d613ad70c7..4373f507e42ddd7c35651c5783dbe49ef13d70a0 100644
--- a/src/common/slurm_cred.c
+++ b/src/common/slurm_cred.c
@@ -1,6 +1,6 @@
 /*****************************************************************************\
  *  src/common/slurm_cred.c - SLURM job credential functions
- *  $Id: slurm_cred.c 14148 2008-05-28 23:35:40Z jette $
+ *  $Id: slurm_cred.c 14208 2008-06-06 19:15:24Z da $
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008 Lawrence Livermore National Security.
@@ -280,6 +280,8 @@ _slurm_crypto_context_destroy( slurm_crypto_context_t *c )
 		if ( plugrack_destroy( c->plugin_list ) != SLURM_SUCCESS ) {
 			 return SLURM_ERROR;
 		}
+	} else {
+		plugin_unload(c->cur_plugin);
 	}
 
 	xfree( c->crypto_type );
@@ -308,7 +310,18 @@ _slurm_crypto_get_ops( slurm_crypto_context_t *c )
 	};
 	int n_syms = sizeof( syms ) / sizeof( char * );
 	int rc = 0;
-        /* Get the plugin list, if needed. */
+ 
+	/* Find the correct plugin. */
+        c->cur_plugin = plugin_load_and_link(c->crypto_type, n_syms, syms,
+					     (void **) &c->ops);
+        if ( c->cur_plugin != PLUGIN_INVALID_HANDLE ) 
+        	return &c->ops;
+
+	error("Couldn't find the specified plugin name for %s "
+	      "looking at all files",
+	      c->crypto_type);
+	
+       /* Get the plugin list, if needed. */
         if ( c->plugin_list == NULL ) {
 		char *plugin_dir;
                 c->plugin_list = plugrack_create();
diff --git a/src/common/slurm_errno.c b/src/common/slurm_errno.c
index 4720b21895c488ad3cc9fd90f1d5a96065a08eee..b8ee3417b48ead79ee4a102ea45a727446f64630 100644
--- a/src/common/slurm_errno.c
+++ b/src/common/slurm_errno.c
@@ -91,6 +91,8 @@ static slurm_errtab_t slurm_errtab[] = {
 	  "Invalid MPI plugin name"                             },
 	{ SLURM_MPI_PLUGIN_PRELAUNCH_SETUP_FAILED,
 	  "MPI plugin's pre-launch setup failed"                },
+	{ SLURM_PLUGIN_NAME_INVALID,
+	  "Plugin initialization failed"			},
 
 	/* communication failures to/from slurmctld */
 	{ SLURMCTLD_COMMUNICATIONS_CONNECTION_ERROR,
@@ -217,6 +219,8 @@ static slurm_errtab_t slurm_errtab[] = {
 	  "of the slurmctld daemon to take effect"},
 	{ ESLURM_ACCOUNTING_POLICY,
 	  "Job violates accounting policy (the user's size and/or time limits)"},
+	{ ESLURM_INVALID_TIME_LIMIT,
+	  "Requested time limit exceeds partition limit"	},
 
 	/* slurmd error codes */
 
diff --git a/src/common/slurm_jobacct_gather.c b/src/common/slurm_jobacct_gather.c
index 2902c4a27b5e8230be527237342bd2bd4448c2db..aff50ffcfca1d30242b8dee7b123e29dd9c7cc82 100644
--- a/src/common/slurm_jobacct_gather.c
+++ b/src/common/slurm_jobacct_gather.c
@@ -150,6 +150,8 @@ _slurm_jobacct_gather_context_destroy( slurm_jobacct_gather_context_t *c )
 		if ( plugrack_destroy( c->plugin_list ) != SLURM_SUCCESS ) {
 			 return SLURM_ERROR;
 		}
+	} else {
+		plugin_unload(c->cur_plugin);
 	}
 
 	xfree( c->jobacct_gather_type );
@@ -189,7 +191,19 @@ _slurm_jobacct_gather_get_ops( slurm_jobacct_gather_context_t *c )
 	};
 	int n_syms = sizeof( syms ) / sizeof( char * );
 	int rc = 0;
-        /* Get the plugin list, if needed. */
+ 	
+	/* Find the correct plugin. */
+        c->cur_plugin = plugin_load_and_link(c->jobacct_gather_type,
+					     n_syms, syms,
+					     (void **) &c->ops);
+        if ( c->cur_plugin != PLUGIN_INVALID_HANDLE ) 
+        	return &c->ops;
+
+	error("Couldn't find the specified plugin name for %s "
+	      "looking at all files",
+	      c->jobacct_gather_type);
+	
+       /* Get the plugin list, if needed. */
         if ( c->plugin_list == NULL ) {
 		char *plugin_dir;
                 c->plugin_list = plugrack_create();
diff --git a/src/common/slurm_jobcomp.c b/src/common/slurm_jobcomp.c
index 3de84f64ccfb08d3c4f3c5a1c4a302d5cee1490d..3cdba9b903da87622324d9514e09853a635e7cd2 100644
--- a/src/common/slurm_jobcomp.c
+++ b/src/common/slurm_jobcomp.c
@@ -127,6 +127,8 @@ _slurm_jobcomp_context_destroy( slurm_jobcomp_context_t c )
 		if ( plugrack_destroy( c->plugin_list ) != SLURM_SUCCESS ) {
 			 return SLURM_ERROR;
 		}
+	} else {
+		plugin_unload(c->cur_plugin);
 	}
 
 	xfree( c->jobcomp_type );
@@ -141,7 +143,7 @@ _slurm_jobcomp_context_destroy( slurm_jobcomp_context_t c )
 static slurm_jobcomp_ops_t *
 _slurm_jobcomp_get_ops( slurm_jobcomp_context_t c )
 {
-        /*
+	/*
          * These strings must be kept in the same order as the fields
          * declared for slurm_jobcomp_ops_t.
          */
@@ -154,8 +156,18 @@ _slurm_jobcomp_get_ops( slurm_jobcomp_context_t c )
 		"slurm_jobcomp_archive"
 	};
         int n_syms = sizeof( syms ) / sizeof( char * );
-
-        /* Get the plugin list, if needed. */
+	
+	/* Find the correct plugin. */
+        c->cur_plugin = plugin_load_and_link(c->jobcomp_type, n_syms, syms,
+					     (void **) &c->ops);
+        if ( c->cur_plugin != PLUGIN_INVALID_HANDLE ) 
+        	return &c->ops;
+
+	error("Couldn't find the specified plugin name for %s "
+	      "looking at all files",
+	      c->jobcomp_type);
+	
+	/* Get the plugin list, if needed. */
         if ( c->plugin_list == NULL ) {
 		char *plugin_dir;
                 c->plugin_list = plugrack_create();
diff --git a/src/common/slurm_protocol_api.c b/src/common/slurm_protocol_api.c
index dbf3e2c3b30235710f7eaaf0ffeaedcc4315021a..42473f1664913d972e90b8f40ff181a4ad35cfee 100644
--- a/src/common/slurm_protocol_api.c
+++ b/src/common/slurm_protocol_api.c
@@ -228,8 +228,8 @@ uint32_t slurm_get_max_mem_per_task(void)
  */
 uint32_t slurm_get_epilog_msg_time(void)
 {
-        uint32_t epilog_msg_time = 0;
-        slurm_ctl_conf_t *conf;
+	uint32_t epilog_msg_time = 0;
+	slurm_ctl_conf_t *conf;
 
  	if(slurmdbd_conf) {
 	} else {
@@ -237,7 +237,7 @@ uint32_t slurm_get_epilog_msg_time(void)
 		epilog_msg_time = conf->epilog_msg_time;
 		slurm_conf_unlock();
 	}
-        return epilog_msg_time;
+	return epilog_msg_time;
 }
 
 /* slurm_get_env_timeout
@@ -245,8 +245,8 @@ uint32_t slurm_get_epilog_msg_time(void)
  */
 int inline slurm_get_env_timeout(void)
 {
-        int timeout = 0;
-        slurm_ctl_conf_t *conf;
+	int timeout = 0;
+	slurm_ctl_conf_t *conf;
 
 	if(slurmdbd_conf) {
 	} else {
@@ -254,7 +254,7 @@ int inline slurm_get_env_timeout(void)
 		timeout = conf->get_env_timeout;
 		slurm_conf_unlock();
 	}
-        return timeout;
+	return timeout;
 }
 
 /* slurm_get_mpi_default
@@ -280,8 +280,8 @@ char *slurm_get_mpi_default(void)
  */
 uint16_t slurm_get_msg_timeout(void)
 {
-        uint16_t msg_timeout = 0;
-        slurm_ctl_conf_t *conf;
+	uint16_t msg_timeout = 0;
+	slurm_ctl_conf_t *conf;
 
  	if(slurmdbd_conf) {
 		msg_timeout = slurmdbd_conf->msg_timeout;
@@ -293,7 +293,7 @@ uint16_t slurm_get_msg_timeout(void)
 		msg_timeout *= 4;
 #endif
 	}
-        return msg_timeout;
+	return msg_timeout;
 }
 
 /* slurm_get_plugin_dir
@@ -411,8 +411,8 @@ extern char *slurm_get_crypto_type(void)
  */
 extern uint16_t slurm_get_propagate_prio_process(void)
 {
-        uint16_t propagate_prio = 0;
-        slurm_ctl_conf_t *conf;
+	uint16_t propagate_prio = 0;
+	slurm_ctl_conf_t *conf;
 
  	if(slurmdbd_conf) {
 	} else {
@@ -420,7 +420,7 @@ extern uint16_t slurm_get_propagate_prio_process(void)
 		propagate_prio = conf->propagate_prio_process;
 		slurm_conf_unlock();
 	}
-        return propagate_prio;
+	return propagate_prio;
 }
 
 /* slurm_get_fast_schedule
@@ -893,8 +893,8 @@ uint32_t slurm_get_slurm_user_id(void)
  * RET uint16_t  - Value of SchedulerRootFilter */
 extern uint16_t slurm_get_root_filter(void)
 {
-        uint16_t root_filter = 0;
-        slurm_ctl_conf_t *conf;
+	uint16_t root_filter = 0;
+	slurm_ctl_conf_t *conf;
  
  	if(slurmdbd_conf) {
 	} else {
@@ -902,14 +902,14 @@ extern uint16_t slurm_get_root_filter(void)
 		root_filter = conf->schedrootfltr;
 		slurm_conf_unlock();
 	}
-        return root_filter;
+	return root_filter;
 }
 /* slurm_get_sched_port
  * RET uint16_t  - Value of SchedulerPort */
 extern uint16_t slurm_get_sched_port(void)
 {
-        uint16_t port = 0;
-        slurm_ctl_conf_t *conf;
+	uint16_t port = 0;
+	slurm_ctl_conf_t *conf;
 
  	if(slurmdbd_conf) {
 	} else {
@@ -917,7 +917,7 @@ extern uint16_t slurm_get_sched_port(void)
 		port = conf->schedport;
 		slurm_conf_unlock();
 	}
-        return port;
+	return port;
 }
 
 /* slurm_get_sched_type
@@ -1029,7 +1029,7 @@ char *slurm_get_srun_epilog(void)
  * RET task_epilog name, must be xfreed by caller */
 char *slurm_get_task_epilog(void)
 {
-        char *task_epilog = NULL;
+	char *task_epilog = NULL;
 	slurm_ctl_conf_t *conf;
 
 	if(slurmdbd_conf) {
@@ -1038,14 +1038,14 @@ char *slurm_get_task_epilog(void)
 		task_epilog = xstrdup(conf->task_epilog);
 		slurm_conf_unlock();
 	}
-        return task_epilog;
+	return task_epilog;
 }
 
 /* slurm_get_task_prolog
  * RET task_prolog name, must be xfreed by caller */
 char *slurm_get_task_prolog(void)
 {
-        char *task_prolog = NULL;
+	char *task_prolog = NULL;
 	slurm_ctl_conf_t *conf;
 
 	if(slurmdbd_conf) {
@@ -1054,27 +1054,27 @@ char *slurm_get_task_prolog(void)
 		task_prolog = xstrdup(conf->task_prolog);
 		slurm_conf_unlock();
 	}
-        return task_prolog;
+	return task_prolog;
 }
 
 /* slurm_get_task_plugin
  * RET task_plugin name, must be xfreed by caller */
 char *slurm_get_task_plugin(void)
 {
-        char *task_plugin = NULL;
+	char *task_plugin = NULL;
 	slurm_ctl_conf_t *conf;
 
 	conf = slurm_conf_lock();
-        task_plugin = xstrdup(conf->task_plugin);
+	task_plugin = xstrdup(conf->task_plugin);
 	slurm_conf_unlock();
-        return task_plugin;
+	return task_plugin;
 }
 
 /* slurm_get_task_plugin_param */
 uint16_t slurm_get_task_plugin_param(void)
 {
-        uint16_t task_plugin_param = 0;
-        slurm_ctl_conf_t *conf;
+	uint16_t task_plugin_param = 0;
+	slurm_ctl_conf_t *conf;
 
 	if(slurmdbd_conf) {
 	} else {
@@ -1082,7 +1082,7 @@ uint16_t slurm_get_task_plugin_param(void)
 		task_plugin_param = conf->task_plugin_param;
 		slurm_conf_unlock();
 	}
-        return task_plugin_param;
+	return task_plugin_param;
 }
 
 /* Change general slurm communication errors to slurmctld specific errors */
@@ -1165,16 +1165,17 @@ int slurm_shutdown_msg_conn(slurm_fd fd)
  *	is defined to only receive messages from the address/port pair  
  *	argument of the connect call slurm_address - for now it is  
  *	really just a sockaddr_in
- * IN slurm_address     - slurm_addr of the connection destination
- * RET slurm_fd         - file descriptor of the connection created
+ * IN slurm_address	- slurm_addr of the connection destination
+ * RET slurm_fd		- file descriptor of the connection created
  */
 slurm_fd slurm_open_msg_conn(slurm_addr * slurm_address)
 {
 	return _slurm_open_msg_conn(slurm_address);
 }
 
-/* calls connect to make a connection-less datagram connection to the 
- *	primary or secondary slurmctld message engine
+/* Calls connect to make a connection-less datagram connection to the 
+ *	primary or secondary slurmctld message engine. If the controller
+ *	is very busy the connect may fail, so retry a couple of times.
  * OUT addr     - address of controller contacted
  * RET slurm_fd	- file descriptor of the connection created
  */
@@ -1182,29 +1183,39 @@ slurm_fd slurm_open_controller_conn(slurm_addr *addr)
 {
 	slurm_fd fd;
 	slurm_ctl_conf_t *conf;
+	int retry, have_backup = 0;
 
 	if (slurm_api_set_default_config() < 0)
 		return SLURM_FAILURE;
-	addr = &proto_conf->primary_controller;
-	if ((fd = slurm_open_msg_conn(&proto_conf->primary_controller)) >= 0)
-		return fd;
-	
-	debug("Failed to contact primary controller: %m");
 
-	conf = slurm_conf_lock();
-	if (!conf->backup_controller) {
-		slurm_conf_unlock();
-		goto fail;
+	for (retry=0; retry<4; retry++) {
+		if (retry)
+			sleep(1);
+
+		addr = &proto_conf->primary_controller;
+		fd = slurm_open_msg_conn(&proto_conf->primary_controller);
+		if (fd >= 0)
+			return fd;
+		debug("Failed to contact primary controller: %m");
+
+		if (retry == 0) {
+			conf = slurm_conf_lock();
+			if (conf->backup_controller)
+				have_backup = 1;
+			slurm_conf_unlock();
+		}
+
+		if (have_backup) {
+			addr = &proto_conf->secondary_controller;
+			fd = slurm_open_msg_conn(&proto_conf->
+						 secondary_controller);
+			if (fd >= 0)
+				return fd;
+			debug("Failed to contact secondary controller: %m");
+		}
 	}
-	slurm_conf_unlock();
 
-	addr = &proto_conf->secondary_controller;
-	if ((fd = slurm_open_msg_conn(&proto_conf->secondary_controller)) >= 0)
-		return fd;
 	addr = NULL;
-	debug("Failed to contact secondary controller: %m");
-
-    fail:
 	slurm_seterrno_ret(SLURMCTLD_COMMUNICATIONS_CONNECTION_ERROR);
 }
 
@@ -1303,7 +1314,7 @@ int slurm_receive_msg(slurm_fd fd, slurm_msg_t *msg, int timeout)
 	
 	if (timeout <= 0)
 		/* convert secs to msec */
-                timeout  = slurm_get_msg_timeout() * 1000; 
+		timeout  = slurm_get_msg_timeout() * 1000; 
 
 	else if(timeout > (slurm_get_msg_timeout() * 10000)) {
 		debug("You are receiving a message with very long "
@@ -1422,9 +1433,9 @@ total_return:
  * IN open_fd	- file descriptor to receive msg on
  * IN steps	- how many steps down the tree we have to wait for
  * IN timeout	- how long to wait in milliseconds
- * RET List	- List containing the responses of the childern (if any) we 
- *                forwarded the message to. List containing type
- *                (ret_data_info_t).
+ * RET List	- List containing the responses of the childern (if any) we
+ *		  forwarded the message to. List containing type
+ *		  (ret_data_info_t).
  */
 List slurm_receive_msgs(slurm_fd fd, int steps, int timeout)
 {
@@ -1446,7 +1457,7 @@ List slurm_receive_msgs(slurm_fd fd, int steps, int timeout)
 	
 	if(timeout <= 0) {
 		/* convert secs to msec */
-                timeout  = slurm_get_msg_timeout() * 1000; 
+		timeout  = slurm_get_msg_timeout() * 1000; 
 		orig_timeout = timeout;
 	}
 	if(steps) {
@@ -1608,7 +1619,7 @@ static int _unpack_msg_uid(Buf buffer)
  *       and list_destroy function.
  * IN open_fd	- file descriptor to receive msg on
  * IN/OUT msg	- a slurm_msg struct to be filled in by the function
- *                we use the orig_addr from this var for forwarding. 
+ *		  we use the orig_addr from this var for forwarding. 
  * IN timeout	- how long to wait in milliseconds
  * RET int	- returns 0 on success, -1 on failure and sets errno
  */
@@ -1641,7 +1652,7 @@ int slurm_receive_msg_and_forward(slurm_fd fd, slurm_addr *orig_addr,
 
 	if (timeout <= 0)
 		/* convert secs to msec */
-                timeout  = slurm_get_msg_timeout() * 1000; 
+		timeout  = slurm_get_msg_timeout() * 1000; 
 		
 	if(timeout >= (slurm_get_msg_timeout() * 10000)) {
 		debug("slurm_receive_msg_and_forward: "
@@ -1925,7 +1936,7 @@ slurm_fd slurm_accept_stream(slurm_fd open_fd, slurm_addr * slurm_address)
 /* slurm_open_stream
  * opens a client connection to stream server
  * IN slurm_address     - slurm_addr of the connection destination
- * RET slurm_fd         - file descriptor of the connection created
+ * RET slurm_fd	 - file descriptor of the connection created
  * NOTE: Retry with various ports as needed if connection is refused
  */
 slurm_fd slurm_open_stream(slurm_addr * slurm_address)
@@ -2302,9 +2313,9 @@ _send_and_recv_msg(slurm_fd fd, slurm_msg_t *req,
  * IN fd	- file descriptor to receive msg on
  * IN req	- a slurm_msg struct to be sent by the function
  * IN timeout	- how long to wait in milliseconds
- * RET List	- List containing the responses of the childern (if any) we 
- *                forwarded the message to. List containing type
- *                (ret_data_info_t). 
+ * RET List	- List containing the responses of the childern (if any) we
+ *		  forwarded the message to. List containing type
+ *		  (ret_data_info_t). 
  */
 static List
 _send_and_recv_msgs(slurm_fd fd, slurm_msg_t *req, int timeout)
@@ -2355,8 +2366,8 @@ _send_and_recv_msgs(slurm_fd fd, slurm_msg_t *req, int timeout)
  * opens a connection to the controller, sends the controller a message, 
  * listens for the response, then closes the connection
  * IN request_msg	- slurm_msg request
- * OUT response_msg     - slurm_msg response
- * RET int              - returns 0 on success, -1 on failure and sets errno
+ * OUT response_msg	- slurm_msg response
+ * RET int		- returns 0 on success, -1 on failure and sets errno
  */
 int slurm_send_recv_controller_msg(slurm_msg_t *req, slurm_msg_t *resp)
 {
@@ -2433,8 +2444,8 @@ int slurm_send_recv_controller_msg(slurm_msg_t *req, slurm_msg_t *resp)
  * for the response, then closes the connection
  * IN request_msg	- slurm_msg request
  * OUT response_msg	- slurm_msg response
- * IN timeout	        - how long to wait in milliseconds
- * RET int	        - returns 0 on success, -1 on failure and sets errno
+ * IN timeout		- how long to wait in milliseconds
+ * RET int		- returns 0 on success, -1 on failure and sets errno
  */
 int slurm_send_recv_node_msg(slurm_msg_t *req, slurm_msg_t *resp, int timeout)
 {
@@ -2528,12 +2539,12 @@ int slurm_send_only_node_msg(slurm_msg_t *req)
 /*
  *  Send a message to the nodelist specificed using fanout
  *    Then return List containing type (ret_data_info_t).
- * IN nodelist	    - list of nodes to send to.
- * IN msg           - a slurm_msg struct to be sent by the function
- * IN timeout	    - how long to wait in milliseconds
- * RET List	    - List containing the responses of the childern
- *                    (if any) we forwarded the message to. List
- *                    containing type (ret_data_info_t).
+ * IN nodelist	  - list of nodes to send to.
+ * IN msg	  - a slurm_msg struct to be sent by the function
+ * IN timeout	  - how long to wait in milliseconds
+ * RET List	  - List containing the responses of the childern
+ *		    (if any) we forwarded the message to. List
+ *		    containing type (ret_data_info_t).
  */
 List slurm_send_recv_msgs(const char *nodelist, slurm_msg_t *msg, 
 			  int timeout)
@@ -2591,11 +2602,11 @@ List slurm_send_recv_msgs(const char *nodelist, slurm_msg_t *msg,
 		msg->forward.nodelist = xstrdup(buf);
 		msg->forward.timeout = timeout;
 		msg->forward.cnt = hostlist_count(hl);
-                if (msg->forward.nodelist[0]) {
-	        	debug3("sending to %s along with to %s", 
-                               name, msg->forward.nodelist);
-                } else
-                        debug3("sending to %s", name);
+		if (msg->forward.nodelist[0]) {
+			debug3("sending to %s along with to %s", 
+			       name, msg->forward.nodelist);
+		} else
+			debug3("sending to %s", name);
 		
 		if(!(ret_list = _send_and_recv_msgs(fd, msg, timeout))) {
 			xfree(msg->forward.nodelist);
@@ -2634,11 +2645,11 @@ List slurm_send_recv_msgs(const char *nodelist, slurm_msg_t *msg,
 /*
  *  Send a message to msg->address
  *    Then return List containing type (ret_data_info_t). 
- * IN msg           - a slurm_msg struct to be sent by the function
- * IN timeout	    - how long to wait in milliseconds
- * RET List	    - List containing the responses of the childern
- *                    (if any) we forwarded the message to. List
- *                    containing type (ret_types_t).
+ * IN msg	  - a slurm_msg struct to be sent by the function
+ * IN timeout	  - how long to wait in milliseconds
+ * RET List	  - List containing the responses of the childern
+ *		    (if any) we forwarded the message to. List
+ *		    containing type (ret_types_t).
  */
 List slurm_send_addr_recv_msgs(slurm_msg_t *msg, char *name, int timeout)
 {
@@ -2739,9 +2750,9 @@ int slurm_send_recv_controller_rc_msg(slurm_msg_t *req, int *rc)
  * of the tree.  
  * IN total       - total number of nodes to send to
  * IN tree_width  - how wide the tree should be on each hop
- * RET int *      - int array tree_width in length each space
- *                  containing the number of nodes to send to each hop
- *                  on the span. 
+ * RET int *	  - int array tree_width in length each space
+ *		    containing the number of nodes to send to each hop
+ *		    on the span. 
  */
 
 extern int *set_span(int total,  uint16_t tree_width)
@@ -2750,8 +2761,8 @@ extern int *set_span(int total,  uint16_t tree_width)
 	int left = total;
 	int i = 0;
 
-        if (tree_width == 0)
-	        tree_width = slurm_get_tree_width();
+	if (tree_width == 0)
+		tree_width = slurm_get_tree_width();
 
 	span = xmalloc(sizeof(int) * tree_width);
 	//info("span count = %d", tree_width);
@@ -2818,8 +2829,8 @@ extern void convert_num_unit(float num, char *buf, int buf_size, int orig_type)
 {
 	char *unit = "\0KMGP?";
 	int i = (int)num % 512;
-	
-	if(i > 0 || (int)num == 0) {
+
+	if((i > 0 && num < 1024) || (int)num == 0) {
 		snprintf(buf, buf_size, "%d%c", (int)num, unit[orig_type]);
 		return;
 	}
diff --git a/src/common/slurm_protocol_socket_implementation.c b/src/common/slurm_protocol_socket_implementation.c
index ea2f01cd40d3159ba56cddd43dff3c34b7417ef9..adb19f2ceeff12c4e16124a4e121d9772c649fb6 100644
--- a/src/common/slurm_protocol_socket_implementation.c
+++ b/src/common/slurm_protocol_socket_implementation.c
@@ -1,9 +1,9 @@
 /*****************************************************************************\
  *  slurm_protocol_socket_implementation.c - slurm communications interfaces 
- *                                           based upon sockets.
- *  $Id: slurm_protocol_socket_implementation.c 13672 2008-03-19 23:10:58Z jette $
+ *					     based upon sockets.
  *****************************************************************************
- *  Copyright (C) 2002-2006 The Regents of the University of California.
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Kevin Tew <tew1@llnl.gov>, et. al.
  *  LLNL-CODE-402394.
@@ -81,7 +81,7 @@
 #define MIN_USER_PORT   (IPPORT_RESERVED + 1)
 #define MAX_USER_PORT   0xffff
 #define RANDOM_USER_PORT ((uint16_t) ((lrand48() % \
-                (MAX_USER_PORT - MIN_USER_PORT + 1)) + MIN_USER_PORT))
+		(MAX_USER_PORT - MIN_USER_PORT + 1)) + MIN_USER_PORT))
 
 /*
  *  Maximum message size. Messages larger than this value (in bytes)
@@ -98,28 +98,28 @@
  */
 static int _tot_wait (struct timeval *start_time)
 {
-        struct timeval end_time;
-        int msec_delay;
+	struct timeval end_time;
+	int msec_delay;
 
-        gettimeofday(&end_time, NULL);
-        msec_delay =   (end_time.tv_sec  - start_time->tv_sec ) * 1000;
-        msec_delay += ((end_time.tv_usec - start_time->tv_usec + 500) / 1000);
-        return msec_delay;
+	gettimeofday(&end_time, NULL);
+	msec_delay =   (end_time.tv_sec  - start_time->tv_sec ) * 1000;
+	msec_delay += ((end_time.tv_usec - start_time->tv_usec + 500) / 1000);
+	return msec_delay;
 }
 
 slurm_fd _slurm_init_msg_engine ( slurm_addr * slurm_address )
 {
-        return _slurm_listen_stream ( slurm_address ) ;
+	return _slurm_listen_stream ( slurm_address ) ;
 }
 
 slurm_fd _slurm_open_msg_conn ( slurm_addr * slurm_address )
 {
-        return _slurm_open_stream ( slurm_address, false ) ;
+	return _slurm_open_stream ( slurm_address, false ) ;
 }
 
 slurm_fd _slurm_accept_msg_conn (slurm_fd fd, slurm_addr *addr)
 {
-        return _slurm_accept_stream(fd, addr);
+	return _slurm_accept_stream(fd, addr);
 } 
 
 /*
@@ -130,27 +130,27 @@ slurm_fd _slurm_accept_msg_conn (slurm_fd fd, slurm_addr *addr)
  */
 static void _sock_bind_wild(int sockfd)
 {
-        int rc, retry;
-        slurm_addr sin;
-        static bool seeded = false;
+	int rc, retry;
+	slurm_addr sin;
+	static bool seeded = false;
 
-        if (!seeded) {
-                seeded = true;
-                srand48((long int) (time(NULL) + getpid()));
-        }
+	if (!seeded) {
+		seeded = true;
+		srand48((long int) (time(NULL) + getpid()));
+	}
 
-        memset(&sin, 0, sizeof(sin));
-        sin.sin_family = AF_INET;
-        sin.sin_addr.s_addr = htonl(INADDR_ANY);
-        sin.sin_port = htons(RANDOM_USER_PORT);
+	memset(&sin, 0, sizeof(sin));
+	sin.sin_family = AF_INET;
+	sin.sin_addr.s_addr = htonl(INADDR_ANY);
+	sin.sin_port = htons(RANDOM_USER_PORT);
 
-        for (retry=0; retry < PORT_RETRIES ; retry++) {
-                rc = bind(sockfd, (struct sockaddr *) &sin, sizeof(sin));
-                if (rc >= 0)
-                        break;
-                sin.sin_port  = htons(RANDOM_USER_PORT);
-        }
-        return;
+	for (retry=0; retry < PORT_RETRIES ; retry++) {
+		rc = bind(sockfd, (struct sockaddr *) &sin, sizeof(sin));
+		if (rc >= 0)
+			break;
+		sin.sin_port  = htons(RANDOM_USER_PORT);
+	}
+	return;
 }
        
 /* 
@@ -158,70 +158,70 @@ static void _sock_bind_wild(int sockfd)
  */
 int _slurm_close_accepted_conn (slurm_fd fd) 
 {
-        return _slurm_close (fd);
+	return _slurm_close (fd);
 }
 
 ssize_t _slurm_msg_recvfrom(slurm_fd fd, char **pbuf, size_t *lenp, 
-                            uint32_t flags)
+			    uint32_t flags)
 {
-        return _slurm_msg_recvfrom_timeout(fd, pbuf, lenp, flags, 
-                                (slurm_get_msg_timeout() * 1000));
+	return _slurm_msg_recvfrom_timeout(fd, pbuf, lenp, flags, 
+				(slurm_get_msg_timeout() * 1000));
 }
 
 ssize_t _slurm_msg_recvfrom_timeout(slurm_fd fd, char **pbuf, size_t *lenp, 
-                                    uint32_t flags, int tmout)
+				    uint32_t flags, int tmout)
 {
-        ssize_t  len;
-        uint32_t msglen;
+	ssize_t  len;
+	uint32_t msglen;
 
-        len = _slurm_recv_timeout( fd, (char *)&msglen, 
-                                   sizeof(msglen), 0, tmout );
+	len = _slurm_recv_timeout( fd, (char *)&msglen, 
+				   sizeof(msglen), 0, tmout );
 
-        if (len < ((ssize_t) sizeof(msglen))) 
-                return SLURM_ERROR;
+	if (len < ((ssize_t) sizeof(msglen))) 
+		return SLURM_ERROR;
 
-        msglen = ntohl(msglen);
+	msglen = ntohl(msglen);
 	
-        if (msglen > MAX_MSG_SIZE) 
-                slurm_seterrno_ret(SLURM_PROTOCOL_INSANE_MSG_LENGTH);
-
-        /*
-         *  Allocate memory on heap for message
-         */
-        *pbuf = xmalloc(msglen);
-
-        if (_slurm_recv_timeout(fd, *pbuf, msglen, 0, tmout) != msglen) {
-                xfree(*pbuf);
-                *pbuf = NULL;
-                return SLURM_ERROR;
-        }
+	if (msglen > MAX_MSG_SIZE) 
+		slurm_seterrno_ret(SLURM_PROTOCOL_INSANE_MSG_LENGTH);
+
+	/*
+	 *  Allocate memory on heap for message
+	 */
+	*pbuf = xmalloc(msglen);
+
+	if (_slurm_recv_timeout(fd, *pbuf, msglen, 0, tmout) != msglen) {
+		xfree(*pbuf);
+		*pbuf = NULL;
+		return SLURM_ERROR;
+	}
 
-        *lenp = msglen;
-        
-        return (ssize_t) msglen;
+	*lenp = msglen;
+	
+	return (ssize_t) msglen;
 }
 
 ssize_t _slurm_msg_sendto(slurm_fd fd, char *buffer, size_t size, 
-                          uint32_t flags)
+			  uint32_t flags)
 {
-        return _slurm_msg_sendto_timeout( fd, buffer, size, flags, 
-                                (slurm_get_msg_timeout() * 1000));
+	return _slurm_msg_sendto_timeout( fd, buffer, size, flags, 
+				(slurm_get_msg_timeout() * 1000));
 }
 
 ssize_t _slurm_msg_sendto_timeout(slurm_fd fd, char *buffer, size_t size, 
-                                  uint32_t flags, int timeout)
+				  uint32_t flags, int timeout)
 {
-        int   len;
-        uint32_t usize;
-        SigFunc *ohandler;
+	int   len;
+	uint32_t usize;
+	SigFunc *ohandler;
 
-        /* 
-         *  Ignore SIGPIPE so that send can return a error code if the 
-         *    other side closes the socket 
-         */
-        ohandler = xsignal(SIGPIPE, SIG_IGN);
+	/* 
+	 *  Ignore SIGPIPE so that send can return a error code if the 
+	 *    other side closes the socket 
+	 */
+	ohandler = xsignal(SIGPIPE, SIG_IGN);
 
-        usize = htonl(size);
+	usize = htonl(size);
 
 	if ((len = _slurm_send_timeout( 
 				fd, (char *)&usize, sizeof(usize), 0, 
@@ -233,41 +233,42 @@ ssize_t _slurm_msg_sendto_timeout(slurm_fd fd, char *buffer, size_t size,
 
 
      done:
-        xsignal(SIGPIPE, ohandler);
-        return len;
+	xsignal(SIGPIPE, ohandler);
+	return len;
 }
 
 /* Send slurm message with timeout
  * RET message size (as specified in argument) or SLURM_ERROR on error */
 int _slurm_send_timeout(slurm_fd fd, char *buf, size_t size, 
-                        uint32_t flags, int timeout)
-{
-        int rc;
-        int sent = 0;
-        int fd_flags;
-        struct pollfd ufds;
-        struct timeval tstart;
-        int timeleft = timeout;
+			uint32_t flags, int timeout)
+{
+	int rc;
+	int sent = 0;
+	int fd_flags;
+	struct pollfd ufds;
+	struct timeval tstart;
+	int timeleft = timeout;
 	char temp[2];
-        ufds.fd     = fd;
-        ufds.events = POLLOUT;
 
-        fd_flags = _slurm_fcntl(fd, F_GETFL);
-        fd_set_nonblocking(fd);
+	ufds.fd     = fd;
+	ufds.events = POLLOUT;
 
-        gettimeofday(&tstart, NULL);
+	fd_flags = _slurm_fcntl(fd, F_GETFL);
+	fd_set_nonblocking(fd);
 
-        while (sent < size) {
+	gettimeofday(&tstart, NULL);
 
-                timeleft = timeout - _tot_wait(&tstart);
-                if (timeleft <= 0) {
+	while (sent < size) {
+		timeleft = timeout - _tot_wait(&tstart);
+		if (timeleft <= 0) {
 			debug("_slurm_send_timeout at %d of %d, timeout",
 				sent, size);
 			slurm_seterrno(SLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT);
 			sent = SLURM_ERROR;
 			goto done;
-                }
-                if ((rc = poll(&ufds, 1, timeleft)) <= 0) {
+		}
+
+		if ((rc = poll(&ufds, 1, timeleft)) <= 0) {
 			if ((rc == 0) || (errno == EINTR) || (errno == EAGAIN)) 
  				continue;
 			else {
@@ -278,7 +279,7 @@ int _slurm_send_timeout(slurm_fd fd, char *buf, size_t size,
 				sent = SLURM_ERROR;
 				goto done;
 			}
-                }
+		}
 
 		/*
 		 * Check here to make sure the socket really is there.
@@ -287,16 +288,26 @@ int _slurm_send_timeout(slurm_fd fd, char *buf, size_t size,
 		 * socket is gone, but getting 0 back from a
 		 * nonblocking read means just that. 
 		 */
-		rc = _slurm_recv(fd, &temp, 1, flags);
-		if (rc == 0) {
-			debug2("_slurm_send_timeout: Socket no longer there.");
+		if (ufds.revents & POLLERR) {
+			debug("_slurm_send_timeout: Socket POLLERR");
 			slurm_seterrno(ENOTCONN);
 			sent = SLURM_ERROR;
 			goto done;			
 		}
+		if ((ufds.revents & POLLHUP) || (ufds.revents & POLLNVAL) ||
+		    (_slurm_recv(fd, &temp, 1, flags) == 0)) {
+			debug2("_slurm_send_timeout: Socket no longer there");
+			slurm_seterrno(ENOTCONN);
+			sent = SLURM_ERROR;
+			goto done;			
+		}
+		if ((ufds.revents & POLLOUT) != POLLOUT) {
+			error("_slurm_send_timeout: Poll failure, revents:%d",
+			      ufds.revents);
+		}
 		
-                rc = _slurm_send(fd, &buf[sent], (size - sent), flags);
-                if (rc < 0) {
+		rc = _slurm_send(fd, &buf[sent], (size - sent), flags);
+		if (rc < 0) {
  			if (errno == EINTR)
 				continue;
 			else {
@@ -307,17 +318,17 @@ int _slurm_send_timeout(slurm_fd fd, char *buf, size_t size,
 				sent = SLURM_ERROR;
 				goto done;
 			}
-                }
-                if (rc == 0) {
+		}
+		if (rc == 0) {
 			debug("_slurm_send_timeout at %d of %d, "
 				"sent zero bytes", sent, size);
-                        slurm_seterrno(SLURM_PROTOCOL_SOCKET_ZERO_BYTES_SENT);
-                        sent = SLURM_ERROR;
-                        goto done;
-                }
+			slurm_seterrno(SLURM_PROTOCOL_SOCKET_ZERO_BYTES_SENT);
+			sent = SLURM_ERROR;
+			goto done;
+		}
 
-                sent += rc;
-        }
+		sent += rc;
+	}
 
     done:
 	/* Reset fd flags to prior state, preserve errno */
@@ -327,45 +338,44 @@ int _slurm_send_timeout(slurm_fd fd, char *buf, size_t size,
 		slurm_seterrno(slurm_err);
 	}
 
-        return sent;
-        
+	return sent;
+	
 }
 
 /* Get slurm message with timeout
  * RET message size (as specified in argument) or SLURM_ERROR on error */
 int _slurm_recv_timeout(slurm_fd fd, char *buffer, size_t size, 
-                        uint32_t flags, int timeout )
+			uint32_t flags, int timeout )
 {
-        int rc;
-        int recvlen = 0;
-        int fd_flags;
-        struct pollfd  ufds;
-        struct timeval tstart;
+	int rc;
+	int recvlen = 0;
+	int fd_flags;
+	struct pollfd  ufds;
+	struct timeval tstart;
 	int timeleft = timeout;
 
-        ufds.fd     = fd;
-        ufds.events = POLLIN;
-
-        fd_flags = _slurm_fcntl(fd, F_GETFL);
-        fd_set_nonblocking(fd);
+	ufds.fd     = fd;
+	ufds.events = POLLIN;
 
-        gettimeofday(&tstart, NULL);
+	fd_flags = _slurm_fcntl(fd, F_GETFL);
+	fd_set_nonblocking(fd);
 
-        while (recvlen < size) {
+	gettimeofday(&tstart, NULL);
 
-                timeleft = timeout - _tot_wait(&tstart);
-                if (timeleft <= 0) {
+	while (recvlen < size) {
+		timeleft = timeout - _tot_wait(&tstart);
+		if (timeleft <= 0) {
 			debug("_slurm_recv_timeout at %d of %d, timeout",
 				recvlen, size);
-                        slurm_seterrno(SLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT);
-                        recvlen = SLURM_ERROR;
-                        goto done;
-                }
-
-                if ((rc = poll(&ufds, 1, timeleft)) <= 0) {
-                        if ((errno == EINTR) || (errno == EAGAIN) || (rc == 0))
-                                continue;
-                        else {
+			slurm_seterrno(SLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT);
+			recvlen = SLURM_ERROR;
+			goto done;
+		}
+
+		if ((rc = poll(&ufds, 1, timeleft)) <= 0) {
+			if ((errno == EINTR) || (errno == EAGAIN) || (rc == 0))
+				continue;
+			else {
 				debug("_slurm_recv_timeout at %d of %d, "
 					"poll error: %s",
 					recvlen, size, strerror(errno));
@@ -373,13 +383,32 @@ int _slurm_recv_timeout(slurm_fd fd, char *buffer, size_t size,
 					SLURM_COMMUNICATIONS_RECEIVE_ERROR);
  				recvlen = SLURM_ERROR; 
   				goto done;
-                        }
-                } 
-                rc = _slurm_recv(fd, &buffer[recvlen], (size - recvlen), flags);
-                if (rc < 0)  {
-                        if (errno == EINTR)
-                                continue;
-                        else {
+			}
+		}
+
+		if (ufds.revents & POLLERR) {
+			debug("_slurm_recv_timeout: Socket POLLERR");
+			slurm_seterrno(ENOTCONN);
+			recvlen = SLURM_ERROR;
+			goto done;			
+		}
+		if ((ufds.revents & POLLHUP) || (ufds.revents & POLLNVAL)) {
+			debug2("_slurm_recv_timeout: Socket no longer there");
+			slurm_seterrno(ENOTCONN);
+			recvlen = SLURM_ERROR;
+			goto done;			
+		}
+		if ((ufds.revents & POLLIN) != POLLIN) {
+			error("_slurm_recv_timeout: Poll failure, revents:%d",
+			      ufds.revents);
+			continue;
+		}
+
+		rc = _slurm_recv(fd, &buffer[recvlen], (size - recvlen), flags);
+		if (rc < 0)  {
+			if (errno == EINTR)
+				continue;
+			else {
 				debug("_slurm_recv_timeout at %d of %d, "
 					"recv error: %s",
 					recvlen, size, strerror(errno));
@@ -387,17 +416,17 @@ int _slurm_recv_timeout(slurm_fd fd, char *buffer, size_t size,
 					SLURM_COMMUNICATIONS_RECEIVE_ERROR);
 				recvlen = SLURM_ERROR; 
 				goto done;
-                        }
-                }
-                if (rc == 0) {
+			}
+		}
+		if (rc == 0) {
 			debug("_slurm_recv_timeout at %d of %d, "
 				"recv zero bytes", recvlen, size);
 			slurm_seterrno(SLURM_PROTOCOL_SOCKET_ZERO_BYTES_SENT);
 			recvlen = SLURM_ERROR;
 			goto done;
-                }
-                recvlen += rc;
-        }
+		}
+		recvlen += rc;
+	}
 
 
     done:
@@ -408,63 +437,63 @@ int _slurm_recv_timeout(slurm_fd fd, char *buffer, size_t size,
 		slurm_seterrno(slurm_err);
 	}
 
-        return recvlen;
+	return recvlen;
 }
 
 int _slurm_shutdown_msg_engine ( slurm_fd open_fd )
 {
-        return _slurm_close ( open_fd ) ;
+	return _slurm_close ( open_fd ) ;
 }
 
 slurm_fd _slurm_listen_stream(slurm_addr *addr)
 {
-        int rc;
-        slurm_fd fd;
-        const int one = 1;
-        const size_t sz1 = sizeof(one);
+	int rc;
+	slurm_fd fd;
+	const int one = 1;
+	const size_t sz1 = sizeof(one);
 
-        if ((fd = _slurm_create_socket(SLURM_STREAM)) < 0) {
+	if ((fd = _slurm_create_socket(SLURM_STREAM)) < 0) {
 		error("Error creating slurm stream socket: %m");
-                return fd;
-        }
+		return fd;
+	}
 
-        rc = _slurm_setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &one, sz1);
-        if (rc < 0) {
+	rc = _slurm_setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &one, sz1);
+	if (rc < 0) {
 		error("setsockopt SO_REUSEADDR failed: %m");
-                goto error; 
-        }
+		goto error; 
+	}
 
-        rc = _slurm_bind(fd, (struct sockaddr const *) addr, sizeof(*addr));
-        if (rc < 0) {
+	rc = _slurm_bind(fd, (struct sockaddr const *) addr, sizeof(*addr));
+	if (rc < 0) {
 		error("Error binding slurm stream socket: %m");
-                goto error; 
-        }
+		goto error; 
+	}
 
-        if (_slurm_listen(fd, SLURM_PROTOCOL_DEFAULT_LISTEN_BACKLOG) < 0) {
+	if (_slurm_listen(fd, SLURM_PROTOCOL_DEFAULT_LISTEN_BACKLOG) < 0) {
 		error( "Error listening on slurm stream socket: %m" ) ;
-                rc = SLURM_ERROR;
-                goto error; 
-        }
-        
-        return fd;
+		rc = SLURM_ERROR;
+		goto error; 
+	}
+	
+	return fd;
 
     error:
 	if ((_slurm_close_stream(fd) < 0) && (errno == EINTR))
 		_slurm_close_stream(fd);	/* try again */
-        return rc;
-        
+	return rc;
+	
 }
 
 slurm_fd _slurm_accept_stream(slurm_fd fd, slurm_addr *addr)
 {
-        socklen_t len = sizeof(slurm_addr);
-        return _slurm_accept(fd, (struct sockaddr *)addr, &len);
+	socklen_t len = sizeof(slurm_addr);
+	return _slurm_accept(fd, (struct sockaddr *)addr, &len);
 }
 
 slurm_fd _slurm_open_stream(slurm_addr *addr, bool retry)
 {
-        int retry_cnt;
-        slurm_fd fd;
+	int retry_cnt;
+	slurm_fd fd;
 
 	if ( (addr->sin_family == 0) || (addr->sin_port  == 0) ) {
 		error("Error connecting, bad data: family = %u, port = %u",
@@ -472,86 +501,86 @@ slurm_fd _slurm_open_stream(slurm_addr *addr, bool retry)
 		return SLURM_SOCKET_ERROR;
 	}
 
-        for (retry_cnt=0; ; retry_cnt++) {
-                int rc;
-                if ((fd =_slurm_create_socket(SLURM_STREAM)) < 0) {
-        		error("Error creating slurm stream socket: %m");
-                        slurm_seterrno(errno);
-                        return SLURM_SOCKET_ERROR;
-                }
-
-                if (retry_cnt) {
-                        if (retry_cnt == 1)
-                                debug3("Error connecting, picking new stream port");
-                        _sock_bind_wild(fd);
-                }
-
-                rc = _slurm_connect(fd, (struct sockaddr const *)addr, sizeof(*addr));
-                if (rc >= 0)                    /* success */
-                        break;
-                if ((errno != ECONNREFUSED) || 
-                    (!retry) || (retry_cnt >= PORT_RETRIES)) {
-                        slurm_seterrno(errno);
-                        goto error;
-                }
-
-                if ((_slurm_close_stream(fd) < 0) && (errno == EINTR))
-                        _slurm_close_stream(fd);        /* try again */
+	for (retry_cnt=0; ; retry_cnt++) {
+		int rc;
+		if ((fd =_slurm_create_socket(SLURM_STREAM)) < 0) {
+			error("Error creating slurm stream socket: %m");
+			slurm_seterrno(errno);
+			return SLURM_SOCKET_ERROR;
+		}
+
+		if (retry_cnt) {
+			if (retry_cnt == 1)
+				debug3("Error connecting, picking new stream port");
+			_sock_bind_wild(fd);
+		}
+
+		rc = _slurm_connect(fd, (struct sockaddr const *)addr, sizeof(*addr));
+		if (rc >= 0)		    /* success */
+			break;
+		if ((errno != ECONNREFUSED) || 
+		    (!retry) || (retry_cnt >= PORT_RETRIES)) {
+			slurm_seterrno(errno);
+			goto error;
+		}
+
+		if ((_slurm_close_stream(fd) < 0) && (errno == EINTR))
+			_slurm_close_stream(fd);	/* try again */
 	}
 
-        return fd;
+	return fd;
 
     error:
-        debug2("Error connecting slurm stream socket: %m");
+	debug2("Error connecting slurm stream socket: %m");
 	if ((_slurm_close_stream(fd) < 0) && (errno == EINTR))
 		_slurm_close_stream(fd);	/* try again */
-        return SLURM_SOCKET_ERROR;
+	return SLURM_SOCKET_ERROR;
 }
 
 int _slurm_get_stream_addr(slurm_fd fd, slurm_addr *addr )
 {
-        socklen_t size = sizeof(addr);
-        return _slurm_getsockname(fd, (struct sockaddr *)addr, &size);
+	socklen_t size = sizeof(addr);
+	return _slurm_getsockname(fd, (struct sockaddr *)addr, &size);
 }
 
 int _slurm_close_stream ( slurm_fd open_fd )
 {
-        return _slurm_close ( open_fd ) ;
+	return _slurm_close ( open_fd ) ;
 }
 
 
 int _slurm_set_stream_non_blocking(slurm_fd fd)
 {
-        fd_set_nonblocking(fd);
-        return SLURM_SUCCESS;
+	fd_set_nonblocking(fd);
+	return SLURM_SUCCESS;
 }
 
 int _slurm_set_stream_blocking(slurm_fd fd) 
 {
-        fd_set_blocking(fd);
-        return SLURM_SUCCESS;
+	fd_set_blocking(fd);
+	return SLURM_SUCCESS;
 }
 
 extern int _slurm_socket (int __domain, int __type, int __protocol)
 {
-        return socket ( __domain, __type, __protocol ) ;
-}        
+	return socket ( __domain, __type, __protocol ) ;
+}	
 
 extern slurm_fd _slurm_create_socket ( slurm_socket_type_t type )
 {
-        switch ( type )
-        {
-                case SLURM_STREAM :
-                        return _slurm_socket ( AF_INET, SOCK_STREAM, 
-                                                IPPROTO_TCP) ;
-                        break;
-                case SLURM_MESSAGE :
-                        return _slurm_socket ( AF_INET, SOCK_DGRAM, 
-                                                IPPROTO_UDP ) ;
-                        break;
-                default :
-                        return SLURM_SOCKET_ERROR;
-        }
+	switch ( type )
+	{
+		case SLURM_STREAM :
+			return _slurm_socket ( AF_INET, SOCK_STREAM, 
+						IPPROTO_TCP) ;
+			break;
+		case SLURM_MESSAGE :
+			return _slurm_socket ( AF_INET, SOCK_DGRAM, 
+						IPPROTO_UDP ) ;
+			break;
+		default :
+			return SLURM_SOCKET_ERROR;
+	}
 }
 
 /* Create two new sockets, of type TYPE in domain DOMAIN and using
@@ -559,23 +588,23 @@ extern slurm_fd _slurm_create_socket ( slurm_socket_type_t type )
  * descriptors for them in FDS[0] and FDS[1].  If PROTOCOL is zero,
  * one will be chosen automatically.  Returns 0 on success, -1 for errors.  */
 extern int _slurm_socketpair (int __domain, int __type, 
-                                int __protocol, int __fds[2])
+			      int __protocol, int __fds[2])
 {
-        return SLURM_PROTOCOL_FUNCTION_NOT_IMPLEMENTED ;
+	return SLURM_PROTOCOL_FUNCTION_NOT_IMPLEMENTED ;
 }
 
 /* Give the socket FD the local address ADDR (which is LEN bytes long).  */
 extern int _slurm_bind (int __fd, struct sockaddr const * __addr, 
-                                socklen_t __len)
+				socklen_t __len)
 {
-        return bind ( __fd , __addr , __len ) ;
+	return bind ( __fd , __addr , __len ) ;
 }
 
 /* Put the local address of FD into *ADDR and its length in *LEN.  */
 extern int _slurm_getsockname (int __fd, struct sockaddr * __addr, 
-                                socklen_t *__restrict __len)
+			       socklen_t *__restrict __len)
 {
-        return getsockname ( __fd , __addr , __len ) ;        
+	return getsockname ( __fd , __addr , __len ) ;	
 }
 
 /* Open a connection on socket FD to peer at ADDR (which LEN bytes long).
@@ -583,7 +612,7 @@ extern int _slurm_getsockname (int __fd, struct sockaddr * __addr,
  * and the only address from which to accept transmissions.
  * Return 0 on success, -1 for errors.  */
 extern int _slurm_connect (int __fd, struct sockaddr const * __addr, 
-                                socklen_t __len)
+			   socklen_t __len)
 {
 #if 0
 	return connect ( __fd , __addr , __len ) ;
@@ -658,76 +687,77 @@ done:
 /* Put the address of the peer connected to socket FD into *ADDR
  * (which is *LEN bytes long), and its actual length into *LEN.  */
 extern int _slurm_getpeername (int __fd, struct sockaddr * __addr, 
-                                socklen_t *__restrict __len)
+			       socklen_t *__restrict __len)
 {
-        return getpeername ( __fd , __addr , __len ) ;
+	return getpeername ( __fd , __addr , __len ) ;
 }
 
 /* Send N bytes of BUF to socket FD.  Returns the number sent or -1.  */
 extern ssize_t _slurm_send (int __fd, __const void *__buf, size_t __n, 
-                                int __flags)
+			    int __flags)
 {
-        return send ( __fd , __buf , __n , __flags ) ;
+	return send ( __fd , __buf , __n , __flags ) ;
 }
 
 /* Read N bytes into BUF from socket FD.
  * Returns the number read or -1 for errors.  */
 extern ssize_t _slurm_recv (int __fd, void *__buf, size_t __n, int __flags)
 {
-        return recv ( __fd , __buf , __n , __flags ) ;
+	return recv ( __fd , __buf , __n , __flags ) ;
 }
 
 /* Send N bytes of BUF on socket FD to peer at address ADDR (which is
  * ADDR_LEN bytes long).  Returns the number sent, or -1 for errors.  */
-extern ssize_t _slurm_sendto (int __fd, __const void *__buf, size_t __n, int __flags, struct sockaddr const * __addr, 
-                                socklen_t __addr_len)
+extern ssize_t _slurm_sendto (int __fd, __const void *__buf, size_t __n, 
+			      int __flags, struct sockaddr const * __addr, 
+			      socklen_t __addr_len)
 {
-        return sendto ( __fd , __buf , __n , __flags , __addr, __addr_len) ;
+	return sendto ( __fd , __buf , __n , __flags , __addr, __addr_len) ;
 }
 /* Read N bytes into BUF through socket FD.
  * If ADDR is not NULL, fill in *ADDR_LEN bytes of it with tha address of
  * the sender, and store the actual size of the address in *ADDR_LEN.
  * Returns the number of bytes read or -1 for errors.  */
 extern ssize_t _slurm_recvfrom (int __fd, void *__restrict __buf, 
-                                size_t __n, int __flags, 
-                                struct sockaddr * __addr, 
-                                socklen_t *__restrict __addr_len)
+				size_t __n, int __flags, 
+				struct sockaddr * __addr, 
+				socklen_t *__restrict __addr_len)
 {
-        return recvfrom ( __fd , __buf , __n , __flags , __addr, __addr_len) ;
+	return recvfrom ( __fd , __buf , __n , __flags , __addr, __addr_len) ;
 }
 
 /* Send a msg described MESSAGE on socket FD.
  * Returns the number of bytes sent, or -1 for errors.  */
 extern ssize_t _slurm_sendmsg (int __fd, __const struct msghdr *__msg, 
-                                int __flags)
+				int __flags)
 {
-        return sendmsg ( __fd , __msg , __flags ) ;
+	return sendmsg ( __fd , __msg , __flags ) ;
 }
 
 /* Send a msg described MESSAGE on socket FD.
  * Returns the number of bytes read or -1 for errors.  */
 extern ssize_t _slurm_recvmsg (int __fd, struct msghdr *__msg, int __flags)
 {
-        return recvmsg ( __fd , __msg , __flags );
+	return recvmsg ( __fd , __msg , __flags );
 }
 
 /* Put the current value for socket FD's option OPTNAME at protocol level LEVEL
  * into OPTVAL (which is *OPTLEN bytes long), and set *OPTLEN to the value's
  * actual length.  Returns 0 on success, -1 for errors.  */
 extern int _slurm_getsockopt (int __fd, int __level, int __optname, 
-                                void *__restrict __optval, 
-                                socklen_t *__restrict __optlen)
+				void *__restrict __optval, 
+				socklen_t *__restrict __optlen)
 {
-        return getsockopt ( __fd , __level , __optname , __optval , __optlen ) ;
+	return getsockopt ( __fd , __level , __optname , __optval , __optlen ) ;
 }
 
 /* Set socket FD's option OPTNAME at protocol level LEVEL
  * to *OPTVAL (which is OPTLEN bytes long).
  * Returns 0 on success, -1 for errors.  */
 extern int _slurm_setsockopt (int __fd, int __level, int __optname, 
-                                __const void *__optval, socklen_t __optlen)
+				__const void *__optval, socklen_t __optlen)
 {
-        return setsockopt ( __fd , __level , __optname , __optval , __optlen ) ;
+	return setsockopt ( __fd , __level , __optname , __optval , __optlen ) ;
 }
 
 
@@ -736,7 +766,7 @@ extern int _slurm_setsockopt (int __fd, int __level, int __optname,
  * Returns 0 on success, -1 for errors.  */
 extern int _slurm_listen (int __fd, int __n)
 {
-        return listen ( __fd , __n ) ;
+	return listen ( __fd , __n ) ;
 }
 
 /* Await a connection on socket FD.
@@ -745,9 +775,9 @@ extern int _slurm_listen (int __fd, int __n)
  * peer and *ADDR_LEN to the address's actual length, and return the
  * new socket's descriptor, or -1 for errors.  */
 extern int _slurm_accept (int __fd, struct sockaddr * __addr, 
-                                socklen_t *__restrict __addr_len)
+				socklen_t *__restrict __addr_len)
 {
-        return accept ( __fd , __addr , __addr_len ) ;
+	return accept ( __fd , __addr , __addr_len ) ;
 }
 
 /* Shut down all or part of the connection open on socket FD.
@@ -758,134 +788,134 @@ extern int _slurm_accept (int __fd, struct sockaddr * __addr,
  * Returns 0 on success, -1 for errors.  */
 extern int _slurm_shutdown (int __fd, int __how)
 {
-        return shutdown ( __fd , __how );
+	return shutdown ( __fd , __how );
 }
 
 extern int _slurm_close (int __fd )
 {
-        return close ( __fd ) ;
+	return close ( __fd ) ;
 }
 
 extern int _slurm_fcntl(int fd, int cmd, ... )
 {
-        int rc ;
-        va_list va ;
+	int rc ;
+	va_list va ;
 
-        va_start ( va , cmd ) ;
-        rc =_slurm_vfcntl ( fd , cmd , va ) ;
-        va_end ( va ) ;
-        return rc ;
+	va_start ( va , cmd ) ;
+	rc =_slurm_vfcntl ( fd , cmd , va ) ;
+	va_end ( va ) ;
+	return rc ;
 }
 
 extern int _slurm_vfcntl(int fd, int cmd, va_list va )
 {
-        long arg ;
-
-        switch ( cmd )
-        {
-                case F_GETFL :
-                        return fcntl ( fd , cmd ) ;
-                        break ;
-                case F_SETFL :
-                        arg = va_arg ( va , long ) ;
-                        return fcntl ( fd , cmd , arg) ;
-                        break ;
-                default :
-                        return SLURM_PROTOCOL_ERROR ;
-                        break ;
-        }
+	long arg ;
+
+	switch ( cmd )
+	{
+		case F_GETFL :
+			return fcntl ( fd , cmd ) ;
+			break ;
+		case F_SETFL :
+			arg = va_arg ( va , long ) ;
+			return fcntl ( fd , cmd , arg) ;
+			break ;
+		default :
+			return SLURM_PROTOCOL_ERROR ;
+			break ;
+	}
 }
 
 /* sets the fields of a slurm_addr */
 void _slurm_set_addr_uint (slurm_addr *addr, uint16_t port, uint32_t ipaddr)
 {
-        addr->sin_family      = AF_SLURM ;
-        addr->sin_port        = htons(port);
-        addr->sin_addr.s_addr = htonl(ipaddr);
+	addr->sin_family      = AF_SLURM ;
+	addr->sin_port	= htons(port);
+	addr->sin_addr.s_addr = htonl(ipaddr);
 }
 
 /* resets the address field of a slurm_addr, port and family are unchanged */
 void _reset_slurm_addr (slurm_addr *addr, slurm_addr new_addr)
 {
-        addr->sin_addr.s_addr = new_addr.sin_addr.s_addr;
+	addr->sin_addr.s_addr = new_addr.sin_addr.s_addr;
 }
 
 void _slurm_set_addr_char (slurm_addr * addr, uint16_t port, char *host)
 {
-        struct hostent * he    = NULL;
-        int              h_err = 0;
-        char *           h_buf[4096];
+	struct hostent * he    = NULL;
+	int	   h_err = 0;
+	char *	   h_buf[4096];
 
-        /* 
-         * If NULL hostname passed in, we only update the port
-         *   of addr
-         */
-        addr->sin_family = AF_SLURM;
-        addr->sin_port   = htons(port);
-        if (host == NULL)
-                return;
+	/* 
+	 * If NULL hostname passed in, we only update the port
+	 *   of addr
+	 */
+	addr->sin_family = AF_SLURM;
+	addr->sin_port   = htons(port);
+	if (host == NULL)
+		return;
 
-        he = get_host_by_name(host, (void *)&h_buf, sizeof(h_buf), &h_err);
+	he = get_host_by_name(host, (void *)&h_buf, sizeof(h_buf), &h_err);
 
-        if (he != NULL)
-                memcpy (&addr->sin_addr.s_addr, he->h_addr, he->h_length);
-        else {
-                error("Unable to resolve \"%s\": %s", host, hstrerror(h_err));
-                addr->sin_family = 0;
-                addr->sin_port = 0;
-        } 
-        return;
+	if (he != NULL)
+		memcpy (&addr->sin_addr.s_addr, he->h_addr, he->h_length);
+	else {
+		error("Unable to resolve \"%s\": %s", host, hstrerror(h_err));
+		addr->sin_family = 0;
+		addr->sin_port = 0;
+	} 
+	return;
 }
 
 void _slurm_get_addr (slurm_addr *addr, uint16_t *port, char *host, 
-                      unsigned int buflen )
+		      unsigned int buflen )
 {
-        struct hostent *he;
-        char   h_buf[4096];
-        int    h_err  = 0;
-        char * s_addr = (char *) &addr->sin_addr.s_addr;
-        int    len    = sizeof(addr->sin_addr.s_addr);
+	struct hostent *he;
+	char   h_buf[4096];
+	int    h_err  = 0;
+	char * s_addr = (char *) &addr->sin_addr.s_addr;
+	int    len    = sizeof(addr->sin_addr.s_addr);
 
-        he = get_host_by_addr( s_addr, len, AF_SLURM, 
-                               (void *) &h_buf, sizeof(h_buf), &h_err );
+	he = get_host_by_addr( s_addr, len, AF_SLURM, 
+			       (void *) &h_buf, sizeof(h_buf), &h_err );
 
-        if (he != NULL) {
-                *port = ntohs(addr->sin_port);
-                strncpy(host, he->h_name, buflen);
-        } else {
-                error("Lookup failed: %s", host_strerror(h_err));
-                *port = 0;
-                strncpy(host, "", buflen);
-        } 
-        return;
+	if (he != NULL) {
+		*port = ntohs(addr->sin_port);
+		strncpy(host, he->h_name, buflen);
+	} else {
+		error("Lookup failed: %s", host_strerror(h_err));
+		*port = 0;
+		strncpy(host, "", buflen);
+	} 
+	return;
 }
 
 void _slurm_print_slurm_addr ( slurm_addr * address, char *buf, size_t n )
 {
-        char addrbuf[INET_ADDRSTRLEN];
-        inet_ntop(AF_INET, &address->sin_addr, addrbuf, INET_ADDRSTRLEN);
-        /* warning: silently truncates */
-        snprintf(buf, n, "%s:%d", addrbuf, ntohs(address->sin_port));
+	char addrbuf[INET_ADDRSTRLEN];
+	inet_ntop(AF_INET, &address->sin_addr, addrbuf, INET_ADDRSTRLEN);
+	/* warning: silently truncates */
+	snprintf(buf, n, "%s:%d", addrbuf, ntohs(address->sin_port));
 }
-        
+	
 void _slurm_pack_slurm_addr(slurm_addr *addr, Buf buffer)
 {
-        pack32( ntohl( addr->sin_addr.s_addr ), buffer );
-        pack16( ntohs( addr->sin_port ), buffer );
+	pack32( ntohl( addr->sin_addr.s_addr ), buffer );
+	pack16( ntohs( addr->sin_port ), buffer );
 }
 
 int _slurm_unpack_slurm_addr_no_alloc(slurm_addr *addr, Buf buffer)
 {
-        addr->sin_family = AF_SLURM ;
-        safe_unpack32(&addr->sin_addr.s_addr, buffer);
-        safe_unpack16(&addr->sin_port, buffer);
+	addr->sin_family = AF_SLURM ;
+	safe_unpack32(&addr->sin_addr.s_addr, buffer);
+	safe_unpack16(&addr->sin_port, buffer);
 
-        addr->sin_addr.s_addr = htonl(addr->sin_addr.s_addr);
-        addr->sin_port = htons(addr->sin_port);
-        return SLURM_SUCCESS;
+	addr->sin_addr.s_addr = htonl(addr->sin_addr.s_addr);
+	addr->sin_port = htons(addr->sin_port);
+	return SLURM_SUCCESS;
 
     unpack_error:
-        return SLURM_ERROR;
+	return SLURM_ERROR;
 }
 
 /*
diff --git a/src/common/slurmdbd_defs.c b/src/common/slurmdbd_defs.c
index 7cec1b4967ea658bf67d026cc4f17743812363e9..3de583ca23c9410b8ad7fb39ca1ab568541956dd 100644
--- a/src/common/slurmdbd_defs.c
+++ b/src/common/slurmdbd_defs.c
@@ -360,6 +360,7 @@ extern Buf pack_slurmdbd_msg(slurmdbd_msg_t *req)
 	case DBD_GET_ACCOUNTS:
 	case DBD_GET_ASSOCS:
 	case DBD_GET_CLUSTERS:
+	case DBD_GET_JOBS_COND:
 	case DBD_GET_USERS:
 	case DBD_REMOVE_ACCOUNTS:
 	case DBD_REMOVE_ASSOCS:
@@ -475,6 +476,7 @@ extern int unpack_slurmdbd_msg(slurmdbd_msg_t *resp, Buf buffer)
 	case DBD_GET_ACCOUNTS:
 	case DBD_GET_ASSOCS:
 	case DBD_GET_CLUSTERS:
+	case DBD_GET_JOBS_COND:
 	case DBD_GET_USERS:
 	case DBD_REMOVE_ACCOUNTS:
 	case DBD_REMOVE_ASSOCS:
@@ -1208,7 +1210,10 @@ static int _purge_job_start_req(void)
 void inline slurmdbd_free_acct_coord_msg(dbd_acct_coord_msg_t *msg)
 {
 	if(msg) {
-		xfree(msg->acct);
+		if(msg->acct_list) {
+			list_destroy(msg->acct_list);
+			msg->acct_list = NULL;
+		}
 		destroy_acct_user_cond(msg->cond);
 		xfree(msg);
 	}
@@ -1240,6 +1245,9 @@ void inline slurmdbd_free_cond_msg(slurmdbd_msg_type_t type,
 		case DBD_REMOVE_CLUSTERS:
 			my_destroy = destroy_acct_cluster_cond;
 			break;
+		case DBD_GET_JOBS_COND:
+			my_destroy = destroy_acct_job_cond;
+			break;
 		case DBD_GET_USERS:
 		case DBD_REMOVE_USERS:
 			my_destroy = destroy_acct_user_cond;
@@ -1432,7 +1440,23 @@ void inline slurmdbd_free_usage_msg(slurmdbd_msg_type_t type,
 void inline
 slurmdbd_pack_acct_coord_msg(dbd_acct_coord_msg_t *msg, Buf buffer)
 {
-	packstr(msg->acct, buffer);
+	char *acct = NULL;
+	ListIterator itr = NULL;
+	uint32_t count = 0;
+
+	if(msg->acct_list)
+		count = list_count(msg->acct_list);
+	
+	pack32(count, buffer);
+	if(count) {
+		itr = list_iterator_create(msg->acct_list);
+		while((acct = list_next(itr))) {
+			packstr(acct, buffer);
+		}
+		list_iterator_destroy(itr);
+	}
+	count = 0;
+
 	pack_acct_user_cond(msg->cond, buffer);
 }
 
@@ -1440,12 +1464,25 @@ int inline
 slurmdbd_unpack_acct_coord_msg(dbd_acct_coord_msg_t **msg, Buf buffer)
 {
 	uint32_t uint32_tmp;
+	int i;
+	char *acct = NULL;
+	uint32_t count = 0;
 	dbd_acct_coord_msg_t *msg_ptr = xmalloc(sizeof(dbd_acct_coord_msg_t));
 	*msg = msg_ptr;
 
-	safe_unpackstr_xmalloc(&msg_ptr->acct, &uint32_tmp, buffer);
+	safe_unpack32(&count, buffer);
+	if(count) {
+		msg_ptr->acct_list = list_create(slurm_destroy_char);
+		for(i=0; i<count; i++) {
+			safe_unpackstr_xmalloc(&acct, &uint32_tmp, buffer);
+			list_append(msg_ptr->acct_list, acct);
+		}
+	}
+
 	if(unpack_acct_user_cond((void *)&msg_ptr->cond, buffer) == SLURM_ERROR)
 		goto unpack_error;
+	return SLURM_SUCCESS;
+
 unpack_error:
 	slurmdbd_free_acct_coord_msg(msg_ptr);
 	*msg = NULL;
@@ -1497,6 +1534,9 @@ void inline slurmdbd_pack_cond_msg(slurmdbd_msg_type_t type,
 	case DBD_REMOVE_CLUSTERS:
 		my_function = pack_acct_cluster_cond;
 		break;
+	case DBD_GET_JOBS_COND:
+		my_function = pack_acct_job_cond;
+		break;
 	case DBD_GET_USERS:
 	case DBD_REMOVE_USERS:
 		my_function = pack_acct_user_cond;
@@ -1528,6 +1568,9 @@ int inline slurmdbd_unpack_cond_msg(slurmdbd_msg_type_t type,
 	case DBD_REMOVE_CLUSTERS:
 		my_function = unpack_acct_cluster_cond;
 		break;
+	case DBD_GET_JOBS_COND:
+		my_function = unpack_acct_job_cond;
+		break;
 	case DBD_GET_USERS:
 	case DBD_REMOVE_USERS:
 		my_function = unpack_acct_user_cond;
@@ -2166,6 +2209,7 @@ slurmdbd_pack_step_complete_msg(dbd_step_comp_msg_t *msg, Buf buffer)
 	pack32(msg->assoc_id, buffer);
 	pack32(msg->db_index, buffer);
 	pack_time(msg->end_time, buffer);
+	pack32(msg->exit_code, buffer);
 	jobacct_common_pack((struct jobacctinfo *)msg->jobacct, buffer);
 	pack32(msg->job_id, buffer);
 	pack32(msg->req_uid, buffer);
@@ -2183,6 +2227,7 @@ slurmdbd_unpack_step_complete_msg(dbd_step_comp_msg_t **msg, Buf buffer)
 	safe_unpack32(&msg_ptr->assoc_id, buffer);
 	safe_unpack32(&msg_ptr->db_index, buffer);
 	safe_unpack_time(&msg_ptr->end_time, buffer);
+	safe_unpack32(&msg_ptr->exit_code, buffer);
 	jobacct_common_unpack((struct jobacctinfo **)&msg_ptr->jobacct, buffer);
 	safe_unpack32(&msg_ptr->job_id, buffer);
 	safe_unpack32(&msg_ptr->req_uid, buffer);
diff --git a/src/common/slurmdbd_defs.h b/src/common/slurmdbd_defs.h
index 9e3ff3f7aad3cba3ffed536bfa8e80a941cacff0..c75cd084295bb7c5a8170cd9a73439f730032628 100644
--- a/src/common/slurmdbd_defs.h
+++ b/src/common/slurmdbd_defs.h
@@ -99,7 +99,7 @@ typedef enum {
 	DBD_RC,			/* Return code from operation		*/
 	DBD_REGISTER_CTLD,	/* Register a slurmctld's comm port	*/
 	DBD_REMOVE_ACCOUNTS,    /* Remove existing account              */
-	DBD_REMOVE_ACCOUNT_COORDS,/* Remove existing coordinatior from
+	DBD_REMOVE_ACCOUNT_COORDS,/* Remove existing coordinator from
 				   * an account */
 	DBD_REMOVE_ASSOCS,      /* Remove existing association          */
 	DBD_REMOVE_CLUSTERS,    /* Remove existing cluster              */
@@ -107,7 +107,8 @@ typedef enum {
 	DBD_ROLL_USAGE,         /* Roll up usage                        */
 	DBD_STEP_COMPLETE,	/* Record step completion		*/
 	DBD_STEP_START,		/* Record step starting			*/
-	DBD_UPDATE_SHARES_USED	/* Record current share usage		*/
+	DBD_UPDATE_SHARES_USED,	/* Record current share usage		*/
+	DBD_GET_JOBS_COND 	/* Get job information with a condition */
 } slurmdbd_msg_type_t;
 
 /*****************************************************************************\
@@ -120,7 +121,7 @@ typedef struct slurmdbd_msg {
 } slurmdbd_msg_t;
 
 typedef struct {
-	char *acct;
+	List acct_list; /* list of account names (char *'s) */
 	acct_user_cond_t *cond;
 } dbd_acct_coord_msg_t;
 
@@ -258,6 +259,7 @@ typedef struct dbd_step_comp_msg {
 	uint32_t assoc_id;	/* accounting association id */
 	uint32_t db_index;	/* index into the db for this job */
 	time_t   end_time;	/* job termintation time */
+	uint32_t exit_code;	/* job exit code or signal */
 	jobacctinfo_t *jobacct; /* status info */
 	uint32_t job_id;	/* job ID */
 	uint32_t req_uid;	/* requester user ID */
diff --git a/src/common/stepd_api.c b/src/common/stepd_api.c
index 88374de26340dc2f4269b45bed6779eff928363f..038748c6c1750ac25db0c838cc043fa9c71819c9 100644
--- a/src/common/stepd_api.c
+++ b/src/common/stepd_api.c
@@ -1,8 +1,10 @@
 /*****************************************************************************\
  *  src/common/stepd_api.c - slurmstepd message API
- *  $Id: stepd_api.c 13695 2008-03-21 21:28:17Z jette $
+ *  $Id: stepd_api.c 14314 2008-06-23 20:57:56Z jette $
  *****************************************************************************
- *  Copyright (C) 2005 The Regents of the University of California.
+ *  Copyright (C) 2005-2007 The Regents of the University of California.
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Portions Copyright (C) 2008 Vijay Ramasubramanian
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Christopher Morrone <morrone2@llnl.gov>
  *  LLNL-CODE-402394.
@@ -169,7 +171,9 @@ _guess_nodename()
 		return NULL;
 
 	nodename = slurm_conf_get_nodename(host);
-	if (nodename == NULL) /* no match?  lets try localhost */
+	if (nodename == NULL)
+		nodename = slurm_conf_get_aliased_nodename();
+	if (nodename == NULL) /* if no match, try localhost */
 		nodename = slurm_conf_get_nodename("localhost");
 
 	return nodename;
diff --git a/src/common/switch.c b/src/common/switch.c
index e3cc7fdf6eed9c6349eb5879cfc1946bb5b98f7d..0a111f9a5dad98a3a5e4686e2d54b39561f408b6 100644
--- a/src/common/switch.c
+++ b/src/common/switch.c
@@ -168,6 +168,8 @@ _slurm_switch_context_destroy( slurm_switch_context_t c )
 		if ( plugrack_destroy( c->plugin_list ) != SLURM_SUCCESS ) {
 			return SLURM_ERROR;
 		}
+	} else {
+		plugin_unload(c->cur_plugin);
 	}
 
 	xfree( c->switch_type );
@@ -226,6 +228,16 @@ _slurm_switch_get_ops( slurm_switch_context_t c )
 	};
 	int n_syms = sizeof( syms ) / sizeof( char * );
 
+	/* Find the correct plugin. */
+        c->cur_plugin = plugin_load_and_link(c->switch_type, n_syms, syms,
+					     (void **) &c->ops);
+        if ( c->cur_plugin != PLUGIN_INVALID_HANDLE ) 
+        	return &c->ops;
+
+	error("Couldn't find the specified plugin name for %s "
+	      "looking at all files",
+	      c->switch_type);
+	
 	/* Get the plugin list, if needed. */
 	if ( c->plugin_list == NULL ) {
 		char *plugin_dir;
diff --git a/src/database/mysql_common.c b/src/database/mysql_common.c
index 24e4de9ec8f25acfe37af0d77a0fc244c023d290..35847b0a2130590ff60fbcf7707fe80bb4102722 100644
--- a/src/database/mysql_common.c
+++ b/src/database/mysql_common.c
@@ -146,12 +146,22 @@ static int _mysql_make_table_current(MYSQL *mysql_db, char *table_name,
 			}
 		}
 		if(!found) {
-			info("adding column %s after %s", fields[i].name,
-			     fields[i-1].name);
-			xstrfmtcat(query, " add %s %s after %s,",
-				   fields[i].name,
-				   fields[i].options,
-				   fields[i-1].name);
+			if(i) {
+				info("adding column %s after %s",
+				     fields[i].name,
+				     fields[i-1].name);
+				xstrfmtcat(query, " add %s %s after %s,",
+					   fields[i].name,
+					   fields[i].options,
+					   fields[i-1].name);
+			} else {
+				info("adding column %s at the beginning",
+				     fields[i].name,
+				     fields[i-1].name);
+				xstrfmtcat(query, " add %s %s first,",
+					   fields[i].name,
+					   fields[i].options);
+			}
 		}
 
 		i++;
diff --git a/src/plugins/accounting_storage/filetxt/accounting_storage_filetxt.c b/src/plugins/accounting_storage/filetxt/accounting_storage_filetxt.c
index 1e95e1baea91e962b6b6e912bafa40f76fe1f0ae..a2a5d5177ad9b5ffd8bb32df5ea4414e7f0c64df 100644
--- a/src/plugins/accounting_storage/filetxt/accounting_storage_filetxt.c
+++ b/src/plugins/accounting_storage/filetxt/accounting_storage_filetxt.c
@@ -251,7 +251,7 @@ extern int acct_storage_p_add_users(void *db_conn, uint32_t uid,
 }
 
 extern int acct_storage_p_add_coord(void *db_conn, uint32_t uid,
-				    char *acct, acct_user_cond_t *user_q)
+				    List acct_list, acct_user_cond_t *user_q)
 {
 	return SLURM_SUCCESS;
 }
@@ -309,7 +309,8 @@ extern List acct_storage_p_remove_users(void *db_conn, uint32_t uid,
 }
 
 extern List acct_storage_p_remove_coord(void *db_conn, uint32_t uid,
-				       char *acct, acct_user_cond_t *user_q)
+					List acct_list, 
+					acct_user_cond_t *user_q)
 {
 	return SLURM_SUCCESS;
 }
@@ -805,6 +806,37 @@ extern List jobacct_storage_p_get_jobs(void *db_conn,
 						params);
 }
 
+/* 
+ * get info from the storage 
+ * returns List of jobacct_job_rec_t *
+ * note List needs to be freed when called
+ */
+extern List jobacct_storage_p_get_jobs_cond(void *db_conn,
+					    acct_job_cond_t *job_cond)
+{
+	sacct_parameters_t params;
+
+	memset(&params, 0, sizeof(sacct_parameters_t));
+	params.opt_uid = -1;
+
+	if(job_cond->cluster_list && list_count(job_cond->cluster_list)) {
+		params.opt_cluster = list_pop(job_cond->cluster_list);
+	}
+	if(job_cond->user_list && list_count(job_cond->user_list)) {
+		char *user = list_pop(job_cond->user_list);
+		struct passwd *pw = NULL;
+		if ((pw=getpwnam(user)))
+			params.opt_uid = pw->pw_uid;
+		xfree(user);
+	}
+
+	return filetxt_jobacct_process_get_jobs(job_cond->step_list, 
+						job_cond->partition_list,
+						&params);
+	if(params.opt_cluster)
+		xfree(params.opt_cluster);
+}
+
 /* 
  * expire old info from the storage 
  */
diff --git a/src/plugins/accounting_storage/filetxt/filetxt_jobacct_process.c b/src/plugins/accounting_storage/filetxt/filetxt_jobacct_process.c
index a9d0f7470d6fdc6ff776405baebf2b4e6eaa2a7b..3711582c943823514f95976135b3aad49f9e74ea 100644
--- a/src/plugins/accounting_storage/filetxt/filetxt_jobacct_process.c
+++ b/src/plugins/accounting_storage/filetxt/filetxt_jobacct_process.c
@@ -299,6 +299,7 @@ static jobacct_job_rec_t *_create_jobacct_job_rec(
 	jobacct_job->priority = filetxt_job->priority;
 	jobacct_job->requid = filetxt_job->requid;
 	memcpy(&jobacct_job->sacct, &filetxt_job->sacct, sizeof(sacct_t));
+	jobacct_job->show_full = filetxt_job->show_full;
 	jobacct_job->start = filetxt_job->header.timestamp -
 		jobacct_job->elapsed;
 	jobacct_job->state = filetxt_job->status;
diff --git a/src/plugins/accounting_storage/gold/accounting_storage_gold.c b/src/plugins/accounting_storage/gold/accounting_storage_gold.c
index 1697520cdedaa08710828d1737ec7664b98a9372..3c5fca395af952c35a180f665c3756ba42081d5a 100644
--- a/src/plugins/accounting_storage/gold/accounting_storage_gold.c
+++ b/src/plugins/accounting_storage/gold/accounting_storage_gold.c
@@ -3240,6 +3240,18 @@ extern List jobacct_storage_p_get_jobs(void *db_conn,
 	return job_list;
 }
 
+/* 
+ * get info from the storage 
+ * returns List of jobacct_job_rec_t *
+ * note List needs to be freed when called
+ */
+extern List jobacct_storage_p_get_jobs_cond(void *db_conn,
+					    void *job_cond)
+{
+	info("not implemented");
+	return NULL;
+}
+
 /* 
  * expire old info from the storage 
  */
diff --git a/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c b/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c
index 21aeaf65d5072b2e53af514cc0525008f097b653..2e3ea135692df0275443b2b5a5e9ff97c94c1880 100644
--- a/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c
+++ b/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c
@@ -117,9 +117,36 @@ extern int acct_storage_p_commit(mysql_conn_t *mysql_conn, bool commit);
 extern int acct_storage_p_add_associations(mysql_conn_t *mysql_conn,
 					   uint32_t uid, 
 					   List association_list);
+
 extern List acct_storage_p_get_associations(mysql_conn_t *mysql_conn, 
 					    acct_association_cond_t *assoc_q);
 
+extern int acct_storage_p_get_usage(mysql_conn_t *mysql_conn,
+				    acct_association_rec_t *acct_assoc,
+				    time_t start, time_t end);
+
+extern int clusteracct_storage_p_get_usage(
+	mysql_conn_t *mysql_conn,
+	acct_cluster_rec_t *cluster_rec, time_t start, time_t end);
+
+
+static int _check_connection(mysql_conn_t *mysql_conn)
+{
+	if(!mysql_conn) {
+		error("We need a connection to run this");
+		return SLURM_ERROR;
+	} else if(!mysql_conn->acct_mysql_db
+		  || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) {
+		if(mysql_get_db_connection(&mysql_conn->acct_mysql_db,
+					   mysql_db_name, mysql_db_info)
+			   != SLURM_SUCCESS) {
+			error("unable to re-connect to mysql database");
+			return SLURM_ERROR;
+		}
+	}
+	return SLURM_SUCCESS;
+}
+
 /* This function will take the object given and free it later so it
  * needed to be removed from a list if in one before 
  */
@@ -154,6 +181,8 @@ static int _addto_update_list(List update_list, acct_update_type_t type,
 	case ACCT_MODIFY_USER:
 	case ACCT_ADD_USER:
 	case ACCT_REMOVE_USER:
+	case ACCT_ADD_COORD:
+	case ACCT_REMOVE_COORD:
 		update_object->objects = list_create(destroy_acct_user_rec);
 		break;
 	case ACCT_ADD_ASSOC:
@@ -175,26 +204,6 @@ static int _move_account(mysql_conn_t *mysql_conn, uint32_t lft, uint32_t rgt,
 			 char *cluster,
 			 char *id, char *parent)
 {
-/*
-  tested sql...
-
-  SELECT @parLeft := lft from assoc_table where cluster='name' && acct='new parent' && user='';
-
-  SELECT @oldLeft := lft, @oldRight := rgt, @myWidth := (rgt - lft + 1), @myDiff := (@parLeft+1) - lft FROM assoc_table WHERE id = 'account id';
-
-  update assoc_table set deleted = deleted + 2, lft = lft + @myDiff, rgt = rgt + @myDiff WHERE lft BETWEEN @oldLeft AND @oldRight;
-
-  UPDATE assoc_table SET rgt = rgt + @myWidth WHERE rgt > @parLeft && deleted < 2;
-  UPDATE assoc_table SET lft = lft + @myWidth WHERE lft > @parLeft && deleted < 2;
-
-  UPDATE assoc_table SET rgt = rgt - @myWidth WHERE (@myDiff < 0 && rgt > @oldRight && deleted < 2) || (@myDiff >= 0 && rgt > @oldLeft);
-  UPDATE assoc_table SET lft = lft - @myWidth WHERE (@myDiff < 0 && lft > @oldRight && deleted < 2) || (@myDiff >= 0 && lft > @oldLeft);
-
-  update assoc_table set deleted = deleted - 2 WHERE deleted > 1;
-	   
-  update assoc_table set parent_acct='new parent' where id = 'account id';
-
-*/
 	int rc = SLURM_SUCCESS;
 	MYSQL_RES *result = NULL;
 	MYSQL_ROW row;
@@ -398,6 +407,7 @@ static int _modify_common(mysql_conn_t *mysql_conn,
 
 	return SLURM_SUCCESS;
 }
+
 static int _modify_unset_users(mysql_conn_t *mysql_conn,
 			       acct_association_rec_t *assoc,
 			       char *acct,
@@ -593,6 +603,9 @@ static int _remove_common(mysql_conn_t *mysql_conn,
 		
 		return SLURM_ERROR;
 	}
+	
+	if(table == acct_coord_table)
+		return SLURM_SUCCESS;
 
 	/* mark deleted=1 or remove completely the
 	   accounting tables
@@ -787,6 +800,86 @@ static int _remove_common(mysql_conn_t *mysql_conn,
 	return rc;
 }
 
+static int _get_user_coords(mysql_conn_t *mysql_conn, acct_user_rec_t *user)
+{
+	char *query = NULL;
+	acct_coord_rec_t *coord = NULL;
+	MYSQL_RES *result = NULL;
+	MYSQL_ROW row;
+	ListIterator itr = NULL;
+
+	if(!user) {
+		error("We need a user to fill in.");
+		return SLURM_ERROR;
+	}
+
+	if(!user->coord_accts)
+		user->coord_accts = list_create(destroy_acct_coord_rec);
+			
+	query = xstrdup_printf(
+		"select acct from %s where user='%s' && deleted=0",
+		acct_coord_table, user->name);
+			
+	if(!(result =
+	     mysql_db_query_ret(mysql_conn->acct_mysql_db, query, 0))) {
+		xfree(query);
+		return SLURM_ERROR;
+	}
+	xfree(query);
+	while((row = mysql_fetch_row(result))) {
+		coord = xmalloc(sizeof(acct_coord_rec_t));
+		list_append(user->coord_accts, coord);
+		coord->acct_name = xstrdup(row[0]);
+		coord->sub_acct = 0;
+		if(query) 
+			xstrcat(query, " || ");
+		else 
+			query = xstrdup_printf(
+				"select distinct t1.acct from "
+				"%s as t1, %s as t2 where ",
+				assoc_table, assoc_table);
+		/* Make sure we don't get the same
+		 * account back since we want to keep
+		 * track of the sub-accounts.
+		 */
+		xstrfmtcat(query, "(t2.acct='%s' "
+			   "&& t1.lft between t2.lft "
+			   "and t2.rgt && t1.user='' "
+			   "&& t1.acct!='%s')",
+			   coord->acct_name, coord->acct_name);
+	}
+	mysql_free_result(result);
+
+	if(query) {
+		if(!(result = mysql_db_query_ret(
+			     mysql_conn->acct_mysql_db, query, 0))) {
+			xfree(query);
+			return SLURM_ERROR;
+		}
+		xfree(query);
+
+		itr = list_iterator_create(user->coord_accts);
+		while((row = mysql_fetch_row(result))) {
+
+			while((coord = list_next(itr))) {
+				if(!strcmp(coord->acct_name, row[0]))
+					break;
+			}
+			list_iterator_reset(itr);
+			if(coord) 
+				continue;
+					
+			coord = xmalloc(sizeof(acct_coord_rec_t));
+			list_append(user->coord_accts, coord);
+			coord->acct_name = xstrdup(row[0]);
+			coord->sub_acct = 1;
+		}
+		list_iterator_destroy(itr);
+		mysql_free_result(result);
+	}
+	return SLURM_SUCCESS;
+}
+
 static int _get_db_index(MYSQL *acct_mysql_db, 
 			 time_t submit, uint32_t jobid, uint32_t associd)
 {
@@ -833,6 +926,8 @@ static int _mysql_acct_check_tables(MYSQL *acct_mysql_db)
 {
 	int rc = SLURM_SUCCESS;
 	storage_field_t acct_coord_table_fields[] = {
+		{ "creation_time", "int unsigned not null" },
+		{ "mod_time", "int unsigned default 0 not null" },
 		{ "deleted", "tinyint default 0" },
 		{ "acct", "tinytext not null" },
 		{ "user", "tinytext not null" },
@@ -1206,6 +1301,7 @@ extern int init ( void )
 	rc = _mysql_acct_check_tables(acct_mysql_db);
 
 	mysql_close_db_connection(&acct_mysql_db);
+	
 #endif		
 
 	if(rc == SLURM_SUCCESS)
@@ -1273,19 +1369,8 @@ extern int acct_storage_p_close_connection(mysql_conn_t **mysql_conn)
 extern int acct_storage_p_commit(mysql_conn_t *mysql_conn, bool commit)
 {
 #ifdef HAVE_MYSQL
-	
-	if(!mysql_conn) {
-		error("We need a connection to run this");
+	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
 		return SLURM_ERROR;
-	} else if(!mysql_conn->acct_mysql_db
-		  || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) {
-		if(mysql_get_db_connection(&mysql_conn->acct_mysql_db,
-					   mysql_db_name, mysql_db_info)
-			   != SLURM_SUCCESS) {
-			error("unable to re-connect to mysql database");
-			return SLURM_ERROR;
-		}
-	}
 
 	debug4("got %d commits", list_count(mysql_conn->update_list));
 
@@ -1373,6 +1458,8 @@ extern int acct_storage_p_commit(mysql_conn_t *mysql_conn, bool commit)
 			case ACCT_MODIFY_USER:
 			case ACCT_ADD_USER:
 			case ACCT_REMOVE_USER:
+			case ACCT_ADD_COORD:
+			case ACCT_REMOVE_COORD:
 				rc = assoc_mgr_update_local_users(object);
 				break;
 			case ACCT_ADD_ASSOC:
@@ -1414,18 +1501,8 @@ extern int acct_storage_p_add_users(mysql_conn_t *mysql_conn, uint32_t uid,
 	int affect_rows = 0;
 	List assoc_list = list_create(destroy_acct_association_rec);
 
-	if(!mysql_conn) {
-		error("We need a connection to run this");
+	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
 		return SLURM_ERROR;
-	} else if(!mysql_conn->acct_mysql_db
-		  || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) {
-		if(mysql_get_db_connection(&mysql_conn->acct_mysql_db,
-					   mysql_db_name, mysql_db_info)
-			   != SLURM_SUCCESS) {
-			error("unable to re-connect to mysql database");
-			return SLURM_ERROR;
-		}
-	}
 
 	if((pw=getpwuid(uid))) {
 		user = pw->pw_name;
@@ -1534,9 +1611,91 @@ extern int acct_storage_p_add_users(mysql_conn_t *mysql_conn, uint32_t uid,
 }
 
 extern int acct_storage_p_add_coord(mysql_conn_t *mysql_conn, uint32_t uid, 
-				    char *acct, acct_user_cond_t *user_q)
+				    List acct_list, acct_user_cond_t *user_q)
 {
 #ifdef HAVE_MYSQL
+	char *query = NULL, *user = NULL, *acct = NULL;
+	char *user_name = NULL, *txn_query = NULL;
+	struct passwd *pw = NULL;
+	ListIterator itr, itr2;
+	time_t now = time(NULL);
+	int rc = SLURM_SUCCESS;
+	acct_user_rec_t *user_rec = NULL;
+	
+	if(!user_q || !user_q->user_list || !list_count(user_q->user_list) 
+	   || !acct_list || !list_count(acct_list)) {
+		error("we need something to add");
+		return SLURM_ERROR;
+	}
+
+	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
+		return SLURM_ERROR;
+
+	if((pw=getpwuid(uid))) {
+		user_name = pw->pw_name;
+	}
+
+	itr = list_iterator_create(user_q->user_list);
+	itr2 = list_iterator_create(acct_list);
+	while((user = list_next(itr))) {
+		while((acct = list_next(itr2))) {
+			if(query) 
+				xstrfmtcat(query, ", (%d, %d, '%s', '%s')",
+					   now, now, acct, user);
+			else
+				query = xstrdup_printf(
+					"insert into %s (creation_time, "
+					"mod_time, acct, user) values "
+					"(%d, %d, '%s', '%s')",
+					acct_coord_table, 
+					now, now, acct, user); 
+
+			if(txn_query)
+				xstrfmtcat(txn_query, 	
+					   ", (%d, %u, '%s', '%s', '%s')",
+					   now, DBD_ADD_ACCOUNT_COORDS, user,
+					   user_name, acct);
+			else
+				xstrfmtcat(txn_query, 	
+					   "insert into %s "
+					   "(timestamp, action, name, "
+					   "actor, info) "
+					   "values (%d, %u, '%s', '%s', '%s')",
+					   txn_table,
+					   now, DBD_ADD_ACCOUNT_COORDS, user,
+					   user_name, acct);
+		}
+		list_iterator_reset(itr2);
+	}
+	list_iterator_destroy(itr);
+	list_iterator_destroy(itr2);
+
+
+	if(query) {
+		xstrfmtcat(query, 
+			   " on duplicate key update mod_time=%d, deleted=0;%s",
+			   now, txn_query);
+		debug3("%d query\n%s", mysql_conn->conn, query);
+		rc = mysql_db_query(mysql_conn->acct_mysql_db, query);
+		xfree(query);
+		xfree(txn_query);
+		
+		if(rc != SLURM_SUCCESS) {
+			error("Couldn't add cluster hour rollup");
+			return rc;
+		}
+		/* get the update list set */
+		itr = list_iterator_create(user_q->user_list);
+		while((user = list_next(itr))) {
+			user_rec = xmalloc(sizeof(acct_user_rec_t));
+			user_rec->name = xstrdup(user);
+			_get_user_coords(mysql_conn, user_rec);
+			_addto_update_list(mysql_conn->update_list, 
+					   ACCT_ADD_COORD, user_rec);
+		}
+		list_iterator_destroy(itr);
+	}
+	
 	return SLURM_SUCCESS;
 #else
 	return SLURM_ERROR;
@@ -1558,18 +1717,8 @@ extern int acct_storage_p_add_accts(mysql_conn_t *mysql_conn, uint32_t uid,
 	int affect_rows = 0;
 	List assoc_list = list_create(destroy_acct_association_rec);
 
-	if(!mysql_conn) {
-		error("We need a connection to run this");
+	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
 		return SLURM_ERROR;
-	} else if(!mysql_conn->acct_mysql_db
-		  || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) {
-		if(mysql_get_db_connection(&mysql_conn->acct_mysql_db,
-					   mysql_db_name, mysql_db_info)
-			   != SLURM_SUCCESS) {
-			error("unable to re-connect to mysql database");
-			return SLURM_ERROR;
-		}
-	}
 
 	if((pw=getpwuid(uid))) {
 		user = pw->pw_name;
@@ -1688,18 +1837,8 @@ extern int acct_storage_p_add_clusters(mysql_conn_t *mysql_conn, uint32_t uid,
 	char *user = NULL;
 	int affect_rows = 0;
 
-	if(!mysql_conn) {
-		error("We need a connection to run this");
+	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
 		return SLURM_ERROR;
-	} else if(!mysql_conn->acct_mysql_db
-		  || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) {
-		if(mysql_get_db_connection(&mysql_conn->acct_mysql_db,
-					   mysql_db_name, mysql_db_info)
-			   != SLURM_SUCCESS) {
-			error("unable to re-connect to mysql database");
-			return SLURM_ERROR;
-		}
-	}
 
 	if((pw=getpwuid(uid))) {
 		user = pw->pw_name;
@@ -1896,18 +2035,8 @@ extern int acct_storage_p_add_associations(mysql_conn_t *mysql_conn,
 		return SLURM_ERROR;
 	}
 
-	if(!mysql_conn) {
-		error("We need a connection to run this");
+	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
 		return SLURM_ERROR;
-	} else if(!mysql_conn->acct_mysql_db
-		  || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) {
-		if(mysql_get_db_connection(&mysql_conn->acct_mysql_db,
-					   mysql_db_name, mysql_db_info)
-			   != SLURM_SUCCESS) {
-			error("unable to re-connect to mysql database");
-			return SLURM_ERROR;
-		}
-	}
 
 	if((pw=getpwuid(uid))) {
 		user = pw->pw_name;
@@ -2275,15 +2404,8 @@ extern List acct_storage_p_modify_users(mysql_conn_t *mysql_conn, uint32_t uid,
 		return NULL;
 	}
 
-	if(!mysql_conn) {
-		error("We need a connection to run this");
+	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
 		return NULL;
-	} else if(!mysql_conn->acct_mysql_db
-		  || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) {
-		if(!mysql_get_db_connection(&mysql_conn->acct_mysql_db,
-					    mysql_db_name, mysql_db_info))
-			return NULL;
-	}
 
 	if((pw=getpwuid(uid))) {
 		user_name = pw->pw_name;
@@ -2346,7 +2468,6 @@ extern List acct_storage_p_modify_users(mysql_conn_t *mysql_conn, uint32_t uid,
 		xfree(query);
 		return NULL;
 	}
-	xfree(query);
 
 	rc = 0;
 	ret_list = list_create(slurm_destroy_char);
@@ -2363,10 +2484,13 @@ extern List acct_storage_p_modify_users(mysql_conn_t *mysql_conn, uint32_t uid,
 	mysql_free_result(result);
 
 	if(!list_count(ret_list)) {
-		debug3("didn't effect anything");
+		errno = SLURM_NO_CHANGE_IN_DATA;
+		debug3("didn't effect anything\n%s", query);
 		xfree(vals);
+		xfree(query);
 		return ret_list;
 	}
+	xfree(query);
 	xstrcat(name_char, ")");
 
 	if(_modify_common(mysql_conn, DBD_MODIFY_USERS, now,
@@ -2408,18 +2532,8 @@ extern List acct_storage_p_modify_accts(mysql_conn_t *mysql_conn, uint32_t uid,
 		return NULL;
 	}
 
-	if(!mysql_conn) {
-		error("We need a connection to run this");
+	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
 		return NULL;
-	} else if(!mysql_conn->acct_mysql_db
-		  || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) {
-		if(mysql_get_db_connection(&mysql_conn->acct_mysql_db,
-					   mysql_db_name, mysql_db_info)
-			   != SLURM_SUCCESS) {
-			error("unable to re-connect to mysql database");
-			return NULL;
-		}
-	}
 
 	if((pw=getpwuid(uid))) {
 		user = pw->pw_name;
@@ -2493,7 +2607,6 @@ extern List acct_storage_p_modify_accts(mysql_conn_t *mysql_conn, uint32_t uid,
 		xfree(vals);
 		return NULL;
 	}
-	xfree(query);
 
 	rc = 0;
 	ret_list = list_create(slurm_destroy_char);
@@ -2511,10 +2624,13 @@ extern List acct_storage_p_modify_accts(mysql_conn_t *mysql_conn, uint32_t uid,
 	mysql_free_result(result);
 
 	if(!list_count(ret_list)) {
-		debug3("didn't effect anything");
+		errno = SLURM_NO_CHANGE_IN_DATA;
+		debug3("didn't effect anything\n%s", query);
+		xfree(query);
 		xfree(vals);
 		return ret_list;
 	}
+	xfree(query);
 	xstrcat(name_char, ")");
 
 	if(_modify_common(mysql_conn, DBD_MODIFY_ACCOUNTS, now,
@@ -2563,18 +2679,8 @@ extern List acct_storage_p_modify_clusters(mysql_conn_t *mysql_conn,
 		return NULL;
 	}
 
-	if(!mysql_conn) {
-		error("We need a connection to run this");
+	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
 		return NULL;
-	} else if(!mysql_conn->acct_mysql_db
-		  || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) {
-		if(mysql_get_db_connection(&mysql_conn->acct_mysql_db,
-					   mysql_db_name, mysql_db_info)
-			   != SLURM_SUCCESS) {
-			error("unable to re-connect to mysql database");
-			return NULL;
-		}
-	}
 
 	if((pw=getpwuid(uid))) {
 		user = pw->pw_name;
@@ -2618,7 +2724,6 @@ extern List acct_storage_p_modify_clusters(mysql_conn_t *mysql_conn,
 		error("no result given for %s", extra);
 		return NULL;
 	}
-	xfree(query);
 	
 	rc = 0;
 	ret_list = list_create(slurm_destroy_char);
@@ -2635,10 +2740,13 @@ extern List acct_storage_p_modify_clusters(mysql_conn_t *mysql_conn,
 	mysql_free_result(result);
 
 	if(!list_count(ret_list)) {
-		debug3("didn't effect anything");
+		errno = SLURM_NO_CHANGE_IN_DATA;
+		debug3("didn't effect anything\n%s", query);
 		xfree(vals);
+		xfree(query);
 		return ret_list;
 	}
+	xfree(query);
 
 	if(vals) {
 		send_char = xstrdup_printf("(%s)", name_char);
@@ -2677,10 +2785,12 @@ extern List acct_storage_p_modify_associations(mysql_conn_t *mysql_conn,
 	char *vals = NULL, *extra = NULL, *query = NULL, *name_char = NULL;
 	time_t now = time(NULL);
 	struct passwd *pw = NULL;
-	char *user = NULL;
-	int set = 0, i = 0;
+	char *user_name = NULL;
+	int set = 0, i = 0, is_admin=0;
 	MYSQL_RES *result = NULL;
 	MYSQL_ROW row;
+	acct_user_rec_t user;
+
 	char *massoc_req_inx[] = {
 		"id",
 		"acct",
@@ -2709,21 +2819,50 @@ extern List acct_storage_p_modify_associations(mysql_conn_t *mysql_conn,
 		return NULL;
 	}
 
-	if(!mysql_conn) {
-		error("We need a connection to run this");
+	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
 		return NULL;
-	} else if(!mysql_conn->acct_mysql_db
-		  || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) {
-		if(mysql_get_db_connection(&mysql_conn->acct_mysql_db,
-					   mysql_db_name, mysql_db_info)
+
+	memset(&user, 0, sizeof(acct_user_rec_t));
+	user.uid = uid;
+
+	/* This only works when running though the slurmdbd.
+	 * THERE IS NO AUTHENTICATION WHEN RUNNNING OUT OF THE
+	 * SLURMDBD!
+	 */
+	if(slurmdbd_conf) {
+		/* we have to check the authentication here in the
+		 * plugin since we don't know what accounts are being
+		 * referenced until after the query.  Here we will
+		 * set if they are an operator or greater and then
+		 * check it below after the query.
+		 */
+		if(uid == slurmdbd_conf->slurm_user_id
+		   || assoc_mgr_get_admin_level(mysql_conn, uid) 
+		   >= ACCT_ADMIN_OPERATOR) 
+			is_admin = 1;	
+		else {
+			if(assoc_mgr_fill_in_user(mysql_conn, &user, 1)
 			   != SLURM_SUCCESS) {
-			error("unable to re-connect to mysql database");
-			return NULL;
+				error("couldn't get information for this user");
+				errno = SLURM_ERROR;
+				return NULL;
+			}
+			if(!user.coord_accts || !list_count(user.coord_accts)) {
+				error("This user doesn't have any "
+				      "coordinator abilities");
+				errno = ESLURM_ACCESS_DENIED;
+				return NULL;
+			}
 		}
+	} else {
+		/* Setting this here just makes it easier down below
+		 * since user will not be filled in.
+		 */
+		is_admin = 1;
 	}
 
 	if((pw=getpwuid(uid))) {
-		user = pw->pw_name;
+		user_name = pw->pw_name;
 	}
 
 	if(assoc_q->acct_list && list_count(assoc_q->acct_list)) {
@@ -2851,6 +2990,33 @@ extern List acct_storage_p_modify_associations(mysql_conn_t *mysql_conn,
 		int account_type=0;
 /* 		MYSQL_RES *result2 = NULL; */
 /* 		MYSQL_ROW row2; */
+
+		if(!is_admin) {
+			acct_coord_rec_t *coord = NULL;
+			if(!user.coord_accts) { // This should never
+						// happen
+				error("We are here with no coord accts");
+				errno = ESLURM_ACCESS_DENIED;
+				mysql_free_result(result);
+				xfree(vals);
+				list_destroy(ret_list);
+				return NULL;
+			}
+			itr = list_iterator_create(user.coord_accts);
+			while((coord = list_next(itr))) {
+				if(!strcasecmp(coord->acct_name, row[1]))
+					break;
+			}
+			list_iterator_destroy(itr);
+
+			if(!coord) {
+				error("User %s(%d) does not have the "
+				      "ability to change this account (%s)",
+				      user.name, user.uid, row[1]);
+				continue;
+			}
+		}
+
 		if(row[MASSOC_PART][0]) { 
 			// see if there is a partition name
 			object = xstrdup_printf(
@@ -2946,6 +3112,7 @@ extern List acct_storage_p_modify_associations(mysql_conn_t *mysql_conn,
 
 
 	if(!list_count(ret_list)) {
+		errno = SLURM_NO_CHANGE_IN_DATA;
 		debug3("didn't effect anything");
 		xfree(vals);
 		return ret_list;
@@ -2954,7 +3121,7 @@ extern List acct_storage_p_modify_associations(mysql_conn_t *mysql_conn,
 
 	if(vals) {
 		if(_modify_common(mysql_conn, DBD_MODIFY_ASSOCS, now,
-				  user, assoc_table, name_char, vals)
+				  user_name, assoc_table, name_char, vals)
 		   == SLURM_ERROR) {
 			error("Couldn't modify associations");
 			list_destroy(ret_list);
@@ -2995,18 +3162,8 @@ extern List acct_storage_p_remove_users(mysql_conn_t *mysql_conn, uint32_t uid,
 		return NULL;
 	}
 
-	if(!mysql_conn) {
-		error("We need a connection to run this");
+	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
 		return NULL;
-	} else if(!mysql_conn->acct_mysql_db
-		  || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) {
-		if(mysql_get_db_connection(&mysql_conn->acct_mysql_db,
-					   mysql_db_name, mysql_db_info)
-			   != SLURM_SUCCESS) {
-			error("unable to re-connect to mysql database");
-			return NULL;
-		}
-	}
 
 	if((pw=getpwuid(uid))) {
 		user_name = pw->pw_name;
@@ -3062,7 +3219,6 @@ extern List acct_storage_p_remove_users(mysql_conn_t *mysql_conn, uint32_t uid,
 		xfree(query);
 		return NULL;
 	}
-	xfree(query);
 
 	rc = 0;
 	ret_list = list_create(slurm_destroy_char);
@@ -3081,10 +3237,13 @@ extern List acct_storage_p_remove_users(mysql_conn_t *mysql_conn, uint32_t uid,
 	mysql_free_result(result);
 
 	if(!list_count(ret_list)) {
-		debug3("didn't effect anything");
+		errno = SLURM_NO_CHANGE_IN_DATA;
+		debug3("didn't effect anything\n%s", query);
+		xfree(query);
 		return ret_list;
 	}
-	
+	xfree(query);
+
 	if(_remove_common(mysql_conn, DBD_REMOVE_USERS, now,
 			  user_name, user_table, name_char, assoc_char)
 	   == SLURM_ERROR) {
@@ -3094,8 +3253,20 @@ extern List acct_storage_p_remove_users(mysql_conn_t *mysql_conn, uint32_t uid,
 		return NULL;
 	}
 	xfree(name_char);
+
+	query = xstrdup_printf(
+		"update %s as t2, set deleted=1, mod_time=%d where %s",
+		acct_coord_table, now, assoc_char);
 	xfree(assoc_char);
 
+	rc = mysql_db_query(mysql_conn->acct_mysql_db, query);
+	xfree(query);
+	if(rc != SLURM_SUCCESS) {
+		error("Couldn't remove user coordinators");
+		list_destroy(ret_list);
+		return NULL;
+	}		
+
 	return ret_list;
 
 #else
@@ -3104,10 +3275,176 @@ extern List acct_storage_p_remove_users(mysql_conn_t *mysql_conn, uint32_t uid,
 }
 
 extern List acct_storage_p_remove_coord(mysql_conn_t *mysql_conn, uint32_t uid, 
-					char *acct, acct_user_cond_t *user_q)
+					List acct_list,
+					acct_user_cond_t *user_q)
 {
 #ifdef HAVE_MYSQL
-	return NULL;
+	char *query = NULL, *object = NULL, *extra = NULL, *last_user = NULL;
+	char *user_name = NULL;
+	struct passwd *pw = NULL;
+	time_t now = time(NULL);
+	int set = 0, is_admin=0;
+	ListIterator itr = NULL;
+	acct_user_rec_t *user_rec = NULL;
+	List ret_list = NULL;
+	List user_list = NULL;
+	MYSQL_RES *result = NULL;
+	MYSQL_ROW row;
+	acct_user_rec_t user;
+
+	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
+		return NULL;
+
+	memset(&user, 0, sizeof(acct_user_rec_t));
+	user.uid = uid;
+
+	/* This only works when running though the slurmdbd.
+	 * THERE IS NO AUTHENTICATION WHEN RUNNNING OUT OF THE
+	 * SLURMDBD!
+	 */
+	if(slurmdbd_conf) {
+		/* we have to check the authentication here in the
+		 * plugin since we don't know what accounts are being
+		 * referenced until after the query.  Here we will
+		 * set if they are an operator or greater and then
+		 * check it below after the query.
+		 */
+		if(uid == slurmdbd_conf->slurm_user_id
+		   || assoc_mgr_get_admin_level(mysql_conn, uid) 
+		   >= ACCT_ADMIN_OPERATOR) 
+			is_admin = 1;	
+		else {
+			if(assoc_mgr_fill_in_user(mysql_conn, &user, 1)
+			   != SLURM_SUCCESS) {
+				error("couldn't get information for this user");
+				errno = SLURM_ERROR;
+				return NULL;
+			}
+			if(!user.coord_accts || !list_count(user.coord_accts)) {
+				error("This user doesn't have any "
+				      "coordinator abilities");
+				errno = ESLURM_ACCESS_DENIED;
+				return NULL;
+			}
+		}
+	} else {
+		/* Setting this here just makes it easier down below
+		 * since user will not be filled in.
+		 */
+		is_admin = 1;
+	}
+
+	if((pw=getpwuid(uid))) {
+		user_name = pw->pw_name;
+	}
+
+	if(user_q->user_list && list_count(user_q->user_list)) {
+		set = 0;
+		if(extra)
+			xstrcat(extra, " && (");
+		else
+			xstrcat(extra, " (");
+			
+		itr = list_iterator_create(user_q->user_list);
+		while((object = list_next(itr))) {
+			if(set) 
+				xstrcat(extra, " || ");
+			xstrfmtcat(extra, "user='%s'", object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(extra, ")");
+	}
+
+	if(acct_list && list_count(acct_list)) {
+		set = 0;
+		if(extra)
+			xstrcat(extra, " && (");
+		else
+			xstrcat(extra, " (");
+
+		itr = list_iterator_create(acct_list);
+		while((object = list_next(itr))) {
+			if(set) 
+				xstrcat(extra, " || ");
+			xstrfmtcat(extra, "acct='%s'", object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(extra, ")");
+	}
+	query = xstrdup_printf(
+		"select user, acct from %s where deleted=0 && %s order by user",
+		acct_coord_table, extra);
+
+	debug3("%d query\n%s", mysql_conn->conn, query);
+	if(!(result =
+	     mysql_db_query_ret(mysql_conn->acct_mysql_db, query, 0))) {
+		xfree(query);
+		xfree(extra);
+		return NULL;
+	}
+	xfree(query);
+	ret_list = list_create(slurm_destroy_char);
+	user_list = list_create(slurm_destroy_char);
+	while((row = mysql_fetch_row(result))) {
+		if(!is_admin) {
+			acct_coord_rec_t *coord = NULL;
+			if(!user.coord_accts) { // This should never
+						// happen
+				error("We are here with no coord accts");
+				errno = ESLURM_ACCESS_DENIED;
+				list_destroy(ret_list);
+				list_destroy(user_list);
+				xfree(extra);
+				mysql_free_result(result);
+				return NULL;
+			}
+			itr = list_iterator_create(user.coord_accts);
+			while((coord = list_next(itr))) {
+				if(!strcasecmp(coord->acct_name, row[1]))
+					break;
+			}
+			list_iterator_destroy(itr);
+
+			if(!coord) {
+				error("User %s(%d) does not have the "
+				      "ability to change this account (%s)",
+				      user.name, user.uid, row[1]);
+				continue;
+			}
+		}
+		if(!last_user || strcasecmp(last_user, row[0])) {
+			list_append(user_list, xstrdup(row[0]));
+			last_user = row[0];
+		}
+		list_append(ret_list, xstrdup_printf("U = %-9s A = %-10s", 
+						     row[0], row[1]));
+	}
+	mysql_free_result(result);
+	
+	if(_remove_common(mysql_conn, DBD_REMOVE_ACCOUNT_COORDS, now,
+			  user_name, acct_coord_table, extra, NULL)
+	   == SLURM_ERROR) {
+		list_destroy(ret_list);
+		list_destroy(user_list);
+		xfree(extra);
+		return NULL;
+	}
+	xfree(extra);
+	/* get the update list set */
+	itr = list_iterator_create(user_list);
+	while((last_user = list_next(itr))) {
+		user_rec = xmalloc(sizeof(acct_user_rec_t));
+		user_rec->name = xstrdup(last_user);
+		_get_user_coords(mysql_conn, user_rec);
+		_addto_update_list(mysql_conn->update_list, 
+				   ACCT_REMOVE_COORD, user_rec);
+	}
+	list_iterator_destroy(itr);
+	list_destroy(user_list);
+
+	return ret_list;
 #else
 	return NULL;
 #endif
@@ -3139,18 +3476,8 @@ extern List acct_storage_p_remove_accts(mysql_conn_t *mysql_conn, uint32_t uid,
 		user_name = pw->pw_name;
 	}
 
-	if(!mysql_conn) {
-		error("We need a connection to run this");
+	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
 		return NULL;
-	} else if(!mysql_conn->acct_mysql_db
-		  || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) {
-		if(mysql_get_db_connection(&mysql_conn->acct_mysql_db,
-					   mysql_db_name, mysql_db_info)
-			   != SLURM_SUCCESS) {
-			error("unable to re-connect to mysql database");
-			return NULL;
-		}
-	}
 
 	xstrcat(extra, "where deleted=0");
 	if(acct_q->acct_list && list_count(acct_q->acct_list)) {
@@ -3211,7 +3538,6 @@ extern List acct_storage_p_remove_accts(mysql_conn_t *mysql_conn, uint32_t uid,
 		xfree(query);
 		return NULL;
 	}
-	xfree(query);
 
 	rc = 0;
 	ret_list = list_create(slurm_destroy_char);
@@ -3230,9 +3556,12 @@ extern List acct_storage_p_remove_accts(mysql_conn_t *mysql_conn, uint32_t uid,
 	mysql_free_result(result);
 
 	if(!list_count(ret_list)) {
-		debug3("didn't effect anything");
+		errno = SLURM_NO_CHANGE_IN_DATA;
+		debug3("didn't effect anything\n%s", query);
+		xfree(query);
 		return ret_list;
 	}
+	xfree(query);
 
 	if(_remove_common(mysql_conn, DBD_REMOVE_ACCOUNTS, now,
 			  user_name, acct_table, name_char, assoc_char)
@@ -3275,18 +3604,8 @@ extern List acct_storage_p_remove_clusters(mysql_conn_t *mysql_conn,
 		return NULL;
 	}
 
-	if(!mysql_conn) {
-		error("We need a connection to run this");
+	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
 		return NULL;
-	} else if(!mysql_conn->acct_mysql_db
-		  || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) {
-		if(mysql_get_db_connection(&mysql_conn->acct_mysql_db,
-					   mysql_db_name, mysql_db_info)
-			   != SLURM_SUCCESS) {
-			error("unable to re-connect to mysql database");
-			return NULL;
-		}
-	}
 
 	if((pw=getpwuid(uid))) {
 		user_name = pw->pw_name;
@@ -3337,6 +3656,7 @@ extern List acct_storage_p_remove_clusters(mysql_conn_t *mysql_conn,
 	mysql_free_result(result);
 
 	if(!list_count(ret_list)) {
+		errno = SLURM_NO_CHANGE_IN_DATA;
 		debug3("didn't effect anything\n%s", query);
 		xfree(query);
 		return ret_list;
@@ -3406,9 +3726,10 @@ extern List acct_storage_p_remove_associations(mysql_conn_t *mysql_conn,
 	time_t now = time(NULL);
 	struct passwd *pw = NULL;
 	char *user_name = NULL;
-	int set = 0, i = 0;
+	int set = 0, i = 0, is_admin=0;
 	MYSQL_RES *result = NULL;
 	MYSQL_ROW row;
+	acct_user_rec_t user;
 
 	/* if this changes you will need to edit the corresponding 
 	 * enum below also t1 is step_table */
@@ -3436,17 +3757,46 @@ extern List acct_storage_p_remove_associations(mysql_conn_t *mysql_conn,
 		return NULL;
 	}
 
-	if(!mysql_conn) {
-		error("We need a connection to run this");
+	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
 		return NULL;
-	} else if(!mysql_conn->acct_mysql_db
-		  || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) {
-		if(mysql_get_db_connection(&mysql_conn->acct_mysql_db,
-					   mysql_db_name, mysql_db_info)
+
+	memset(&user, 0, sizeof(acct_user_rec_t));
+	user.uid = uid;
+
+	/* This only works when running though the slurmdbd.
+	 * THERE IS NO AUTHENTICATION WHEN RUNNNING OUT OF THE
+	 * SLURMDBD!
+	 */
+	if(slurmdbd_conf) {
+		/* we have to check the authentication here in the
+		 * plugin since we don't know what accounts are being
+		 * referenced until after the query.  Here we will
+		 * set if they are an operator or greater and then
+		 * check it below after the query.
+		 */
+		if(uid == slurmdbd_conf->slurm_user_id
+		   || assoc_mgr_get_admin_level(mysql_conn, uid) 
+		   >= ACCT_ADMIN_OPERATOR) 
+			is_admin = 1;	
+		else {
+			if(assoc_mgr_fill_in_user(mysql_conn, &user, 1)
 			   != SLURM_SUCCESS) {
-			error("unable to re-connect to mysql database");
-			return NULL;
+				error("couldn't get information for this user");
+				errno = SLURM_ERROR;
+				return NULL;
+			}
+			if(!user.coord_accts || !list_count(user.coord_accts)) {
+				error("This user doesn't have any "
+				      "coordinator abilities");
+				errno = ESLURM_ACCESS_DENIED;
+				return NULL;
+			}
 		}
+	} else {
+		/* Setting this here just makes it easier down below
+		 * since user will not be filled in.
+		 */
+		is_admin = 1;
 	}
 
 	xstrcat(extra, "where id>0 && deleted=0");
@@ -3531,7 +3881,6 @@ extern List acct_storage_p_remove_associations(mysql_conn_t *mysql_conn,
 		xfree(query);
 		return NULL;
 	}
-	xfree(query);
 		
 	rc = 0;
 	while((row = mysql_fetch_row(result))) {
@@ -3547,6 +3896,7 @@ extern List acct_storage_p_remove_associations(mysql_conn_t *mysql_conn,
 	mysql_free_result(result);
 
 	if(!name_char) {
+		errno = SLURM_NO_CHANGE_IN_DATA;
 		debug3("didn't effect anything\n%s", query);
 		xfree(query);
 		return ret_list;
@@ -3571,7 +3921,29 @@ extern List acct_storage_p_remove_associations(mysql_conn_t *mysql_conn,
 	ret_list = list_create(slurm_destroy_char);
 	while((row = mysql_fetch_row(result))) {
 		acct_association_rec_t *rem_assoc = NULL;
+		if(!is_admin) {
+			acct_coord_rec_t *coord = NULL;
+			if(!user.coord_accts) { // This should never
+						// happen
+				error("We are here with no coord accts");
+				errno = ESLURM_ACCESS_DENIED;
+				goto end_it;
+			}
+			itr = list_iterator_create(user.coord_accts);
+			while((coord = list_next(itr))) {
+				if(!strcasecmp(coord->acct_name,
+					       row[RASSOC_ACCT]))
+					break;
+			}
+			list_iterator_destroy(itr);
 
+			if(!coord) {
+				error("User %s(%d) does not have the "
+				      "ability to change this account (%s)",
+				      user.name, user.uid, row[RASSOC_ACCT]);
+				continue;
+			}
+		}
 		if(row[RASSOC_PART][0]) { 
 			// see if there is a partition name
 			object = xstrdup_printf(
@@ -3625,6 +3997,14 @@ extern List acct_storage_p_remove_associations(mysql_conn_t *mysql_conn,
 	xfree(assoc_char);
 
 	return ret_list;
+end_it:
+	if(ret_list) {
+		list_destroy(ret_list);
+		ret_list = NULL;
+	}
+	mysql_free_result(result);
+
+	return NULL;
 #else
 	return NULL;
 #endif
@@ -3642,8 +4022,8 @@ extern List acct_storage_p_get_users(mysql_conn_t *mysql_conn,
 	char *object = NULL;
 	int set = 0;
 	int i=0;
-	MYSQL_RES *result = NULL, *coord_result = NULL;
-	MYSQL_ROW row, coord_row;
+	MYSQL_RES *result = NULL;
+	MYSQL_ROW row;
 
 	/* if this changes you will need to edit the corresponding enum */
 	char *user_req_inx[] = {
@@ -3660,23 +4040,21 @@ extern List acct_storage_p_get_users(mysql_conn_t *mysql_conn,
 		USER_REQ_COUNT
 	};
 
-	if(!mysql_conn) {
-		error("We need a connection to run this");
+	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
 		return NULL;
-	} else if(!mysql_conn->acct_mysql_db
-		  || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) {
-		if(mysql_get_db_connection(&mysql_conn->acct_mysql_db,
-					   mysql_db_name, mysql_db_info)
-			   != SLURM_SUCCESS) {
-			error("unable to re-connect to mysql database");
-			return NULL;
-		}
-	}
 
-	xstrcat(extra, "where deleted=0");
 
-	if(!user_q) 
+	
+	if(!user_q) {
+		xstrcat(extra, "where deleted=0");
 		goto empty;
+	} 
+	
+	if(user_q->with_deleted) 
+		xstrcat(extra, "where (deleted=0 || deleted=1)");
+	else
+		xstrcat(extra, "where deleted=0");
+		
 
 	if(user_q->user_list && list_count(user_q->user_list)) {
 		set = 0;
@@ -3747,7 +4125,7 @@ empty:
 
 	while((row = mysql_fetch_row(result))) {
 		acct_user_rec_t *user = xmalloc(sizeof(acct_user_rec_t));
-		struct passwd *passwd_ptr = NULL;
+/* 		struct passwd *passwd_ptr = NULL; */
 		list_append(user_list, user);
 
 		user->name =  xstrdup(row[USER_REQ_NAME]);
@@ -3755,33 +4133,19 @@ empty:
 		user->admin_level = atoi(row[USER_REQ_AL]);
 		user->qos = atoi(row[USER_REQ_EX]);
 
-		passwd_ptr = getpwnam(user->name);
-		if(passwd_ptr) 
-			user->uid = passwd_ptr->pw_uid;
-		
-		user->coord_accts = list_create(destroy_acct_coord_rec);
-		query = xstrdup_printf("select acct from %s where user='%s' "
-				       "&& deleted=0",
-				       acct_coord_table, user->name);
-
-		if(!(coord_result =
-		     mysql_db_query_ret(mysql_conn->acct_mysql_db, query, 0))) {
-			xfree(query);
-			continue;
-		}
-		xfree(query);
-		
-		while((coord_row = mysql_fetch_row(coord_result))) {
-			acct_coord_rec_t *coord =
-				xmalloc(sizeof(acct_coord_rec_t));
-			list_append(user->coord_accts, coord);
-			coord->acct_name = xstrdup(coord_row[0]);
-			coord->sub_acct = 0;
+		/* user id will be set on the client since this could be on a
+		 * different machine where this user may not exist or
+		 * may have a different uid
+		 */
+/* 		passwd_ptr = getpwnam(user->name); */
+/* 		if(passwd_ptr)  */
+/* 			user->uid = passwd_ptr->pw_uid; */
+/* 		else */
+/* 			user->uid = (uint32_t)NO_VAL; */
+		if(user_q && user_q->with_coords) {
+			_get_user_coords(mysql_conn, user);
 		}
-		mysql_free_result(coord_result);
-		/* FIX ME: ADD SUB projects here from assoc list lft
-		 * rgt */
-		
+
 		if(user_q && user_q->with_assocs) {
 			acct_association_cond_t *assoc_q = NULL;
 			if(!user_q->assoc_cond) {
@@ -3838,22 +4202,19 @@ extern List acct_storage_p_get_accts(mysql_conn_t *mysql_conn,
 		ACCT_REQ_COUNT
 	};
 
-	if(!mysql_conn) {
-		error("We need a connection to run this");
+	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
 		return NULL;
-	} else if(!mysql_conn->acct_mysql_db
-		  || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) {
-		if(mysql_get_db_connection(&mysql_conn->acct_mysql_db,
-					   mysql_db_name, mysql_db_info)
-			   != SLURM_SUCCESS) {
-			error("unable to re-connect to mysql database");
-			return NULL;
-		}
-	}
 
-	xstrcat(extra, "where deleted=0");
-	if(!acct_q) 
+	
+	if(!acct_q) {
+		xstrcat(extra, "where deleted=0");
 		goto empty;
+	} 
+
+	if(acct_q->with_deleted) 
+		xstrcat(extra, "where (deleted=0 || deleted=1)");
+	else
+		xstrcat(extra, "where deleted=0");
 
 	if(acct_q->acct_list && list_count(acct_q->acct_list)) {
 		set = 0;
@@ -4024,23 +4385,19 @@ extern List acct_storage_p_get_clusters(mysql_conn_t *mysql_conn,
 		ASSOC_REQ_COUNT
 	};
 
-	if(!mysql_conn) {
-		error("We need a connection to run this");
+	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
 		return NULL;
-	} else if(!mysql_conn->acct_mysql_db
-		  || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) {
-		if(mysql_get_db_connection(&mysql_conn->acct_mysql_db,
-					   mysql_db_name, mysql_db_info)
-			   != SLURM_SUCCESS) {
-			error("unable to re-connect to mysql database");
-			return NULL;
-		}
-	}
 
-	xstrcat(extra, "where deleted=0");
 		
-	if(!cluster_q) 
+	if(!cluster_q) {
+		xstrcat(extra, "where deleted=0");
 		goto empty;
+	}
+
+	if(cluster_q->with_deleted) 
+		xstrcat(extra, "where (deleted=0 || deleted=1)");
+	else
+		xstrcat(extra, "where deleted=0");
 
 	if(cluster_q->cluster_list && list_count(cluster_q->cluster_list)) {
 		set = 0;
@@ -4094,6 +4451,14 @@ empty:
 		list_append(cluster_list, cluster);
 
 		cluster->name =  xstrdup(row[CLUSTER_REQ_NAME]);
+
+		/* get the usage if requested */
+		if(cluster_q->with_usage) {
+			clusteracct_storage_p_get_usage(mysql_conn, cluster,
+							cluster_q->usage_start,
+							cluster_q->usage_end);
+		}
+
 		cluster->control_host = xstrdup(row[CLUSTER_REQ_CH]);
 		cluster->control_port = atoi(row[CLUSTER_REQ_CP]);
 		query = xstrdup_printf("select %s from %s where cluster='%s' "
@@ -4172,6 +4537,8 @@ extern List acct_storage_p_get_associations(mysql_conn_t *mysql_conn,
 	/* if this changes you will need to edit the corresponding enum */
 	char *assoc_req_inx[] = {
 		"id",
+		"lft",
+		"rgt",
 		"user",
 		"acct",
 		"cluster",
@@ -4185,6 +4552,8 @@ extern List acct_storage_p_get_associations(mysql_conn_t *mysql_conn,
 	};
 	enum {
 		ASSOC_REQ_ID,
+		ASSOC_REQ_LFT,
+		ASSOC_REQ_RGT,
 		ASSOC_REQ_USER,
 		ASSOC_REQ_ACCT,
 		ASSOC_REQ_CLUSTER,
@@ -4205,22 +4574,19 @@ extern List acct_storage_p_get_associations(mysql_conn_t *mysql_conn,
 		ASSOC2_REQ_MCPJ
 	};
 
-	if(!mysql_conn) {
-		error("We need a connection to run this");
+	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
 		return NULL;
-	} else if(!mysql_conn->acct_mysql_db
-		  || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) {
-		if(mysql_get_db_connection(&mysql_conn->acct_mysql_db,
-					   mysql_db_name, mysql_db_info)
-			   != SLURM_SUCCESS) {
-			error("unable to re-connect to mysql database");
-			return NULL;
-		}
-	}
 
-	xstrcat(extra, "where deleted=0");
-	if(!assoc_q) 
+
+	if(!assoc_q) {
+		xstrcat(extra, "where deleted=0");
 		goto empty;
+	}
+
+	if(assoc_q->with_deleted) 
+		xstrcat(extra, "where (deleted=0 || deleted=1)");
+	else
+		xstrcat(extra, "where deleted=0");
 
 	if(assoc_q->acct_list && list_count(assoc_q->acct_list)) {
 		set = 0;
@@ -4269,6 +4635,16 @@ extern List acct_storage_p_get_associations(mysql_conn_t *mysql_conn,
 		xstrcat(extra, " && (");
 		itr = list_iterator_create(assoc_q->id_list);
 		while((object = list_next(itr))) {
+			char *ptr = NULL;
+			long num = strtol(object, &ptr, 10);
+			if ((num == 0) && ptr && ptr[0]) {
+				error("Invalid value for assoc id (%s)",
+				      object);
+				xfree(extra);
+				list_iterator_destroy(itr);
+				return NULL;
+			}
+
 			if(set) 
 				xstrcat(extra, " || ");
 			xstrfmtcat(extra, "id=%s", object);
@@ -4310,8 +4686,17 @@ empty:
 
 		list_append(assoc_list, assoc);
 		
-		assoc->id =  atoi(row[ASSOC_REQ_ID]);
-		
+		assoc->id = atoi(row[ASSOC_REQ_ID]);
+		assoc->lft = atoi(row[ASSOC_REQ_LFT]);
+		assoc->rgt = atoi(row[ASSOC_REQ_RGT]);
+	
+		/* get the usage if requested */
+		if(assoc_q->with_usage) {
+			acct_storage_p_get_usage(mysql_conn, assoc,
+						 assoc_q->usage_start,
+						 assoc_q->usage_end);
+		}
+
 		if(row[ASSOC_REQ_USER][0])
 			assoc->user = xstrdup(row[ASSOC_REQ_USER]);
 		assoc->acct = xstrdup(row[ASSOC_REQ_ACCT]);
@@ -4435,6 +4820,131 @@ extern int acct_storage_p_get_usage(mysql_conn_t *mysql_conn,
 {
 #ifdef HAVE_MYSQL
 	int rc = SLURM_SUCCESS;
+	int i=0;
+	MYSQL_RES *result = NULL;
+	MYSQL_ROW row;
+	char *tmp = NULL;
+	char *my_usage_table = assoc_day_table;
+	time_t my_time = time(NULL);
+	struct tm start_tm;
+	struct tm end_tm;
+	char *query = NULL;
+
+	char *assoc_req_inx[] = {
+		"t1.id",
+		"t1.period_start",
+		"t1.alloc_cpu_secs"
+	};
+	
+	enum {
+		ASSOC_ID,
+		ASSOC_START,
+		ASSOC_ACPU,
+		ASSOC_COUNT
+	};
+
+	if(!acct_assoc->id) {
+		error("We need a assoc id to set data for");
+		return SLURM_ERROR;
+	}
+
+	/* Default is going to be the last day */
+	if(!end) {
+		if(!localtime_r(&my_time, &end_tm)) {
+			error("Couldn't get localtime from end %d",
+			      my_time);
+			return SLURM_ERROR;
+		}
+		end_tm.tm_hour = 0;
+		end = mktime(&end_tm);		
+	} else {
+		if(!localtime_r(&end, &end_tm)) {
+			error("Couldn't get localtime from user end %d",
+			      my_time);
+			return SLURM_ERROR;
+		}
+	}
+	end_tm.tm_sec = 0;
+	end_tm.tm_min = 0;
+	end_tm.tm_isdst = -1;
+	end = mktime(&end_tm);		
+
+	if(!start) {
+		if(!localtime_r(&my_time, &start_tm)) {
+			error("Couldn't get localtime from start %d",
+			      my_time);
+			return SLURM_ERROR;
+		}
+		start_tm.tm_hour = 0;
+		start_tm.tm_mday--;
+		start = mktime(&start_tm);		
+	} else {
+		if(!localtime_r(&start, &start_tm)) {
+			error("Couldn't get localtime from user start %d",
+			      my_time);
+			return SLURM_ERROR;
+		}
+	}
+	start_tm.tm_sec = 0;
+	start_tm.tm_min = 0;
+	start_tm.tm_isdst = -1;
+	start = mktime(&start_tm);		
+
+	if(end-start < 3600) {
+		end = start + 3600;
+		if(!localtime_r(&end, &end_tm)) {
+			error("2 Couldn't get localtime from user end %d",
+			      my_time);
+			return SLURM_ERROR;
+		}
+	}
+	/* check to see if we are off day boundaries or on month
+	 * boundaries other wise use the day table.
+	 */
+	if(start_tm.tm_hour || end_tm.tm_hour || (end-start < 86400)) 
+		my_usage_table = assoc_hour_table;
+	else if(start_tm.tm_mday == 0 && end_tm.tm_mday == 0 
+		&& (end-start > 86400))
+		my_usage_table = assoc_month_table;
+		
+	xfree(tmp);
+	i=0;
+	xstrfmtcat(tmp, "%s", assoc_req_inx[i]);
+	for(i=1; i<ASSOC_COUNT; i++) {
+		xstrfmtcat(tmp, ", %s", assoc_req_inx[i]);
+	}
+
+	query = xstrdup_printf(
+		"select %s from %s as t1, %s as t2, %s as t3 "
+		"where (t1.period_start < %d && t1.period_start >= %d) "
+		"&& t1.id=t2.id && t3.id=%u && "
+		"t2.lft between t3.lft and t3.rgt "
+		"order by t1.id, period_start;",
+		tmp, my_usage_table, assoc_table, assoc_table, end, start,
+		acct_assoc->id);
+	xfree(tmp);
+	debug3("%d query\n%s", mysql_conn->conn, query);
+	if(!(result = mysql_db_query_ret(
+		     mysql_conn->acct_mysql_db, query, 0))) {
+		xfree(query);
+		return SLURM_ERROR;
+	}
+	xfree(query);
+
+	if(!acct_assoc->accounting_list)
+		acct_assoc->accounting_list =
+			list_create(destroy_acct_accounting_rec);
+
+	while((row = mysql_fetch_row(result))) {
+		acct_accounting_rec_t *accounting_rec =
+			xmalloc(sizeof(acct_accounting_rec_t));
+		accounting_rec->assoc_id = atoi(row[ASSOC_ID]);
+		accounting_rec->period_start = atoi(row[ASSOC_START]);
+		accounting_rec->alloc_secs = atoll(row[ASSOC_ACPU]);
+		list_append(acct_assoc->accounting_list, accounting_rec);
+	}
+	mysql_free_result(result);
+	
 	return rc;
 #else
 	return SLURM_ERROR;
@@ -4474,18 +4984,8 @@ extern int acct_storage_p_roll_usage(mysql_conn_t *mysql_conn,
 		UPDATE_COUNT
 	};
 
-	if(!mysql_conn) {
-		error("We need a connection to run this");
+	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
 		return SLURM_ERROR;
-	} else if(!mysql_conn->acct_mysql_db
-		  || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) {
-		if(mysql_get_db_connection(&mysql_conn->acct_mysql_db,
-					   mysql_db_name, mysql_db_info)
-			   != SLURM_SUCCESS) {
-			error("unable to re-connect to mysql database");
-			return SLURM_ERROR;
-		}
-	}
 
 	if(!sent_start) {
 		i=0;
@@ -4537,6 +5037,13 @@ extern int acct_storage_p_roll_usage(mysql_conn_t *mysql_conn,
 			mysql_free_result(result);
 		}
 	}
+	
+	/* test month gap */
+/* 	last_hour = 1212299999; */
+/* 	last_day = 1212217200; */
+/* 	last_month = 1212217200; */
+/* 	my_time = 1212307200; */
+
 /* 	last_hour = 1211475599; */
 /* 	last_day = 1211475599; */
 /* 	last_month = 1211475599; */
@@ -4560,19 +5067,22 @@ extern int acct_storage_p_roll_usage(mysql_conn_t *mysql_conn,
 	/* below and anywhere in a rollup plugin when dealing with
 	 * epoch times we need to set the tm_isdst = -1 so we don't
 	 * have to worry about the time changes.  Not setting it to -1
-	 * will cause problems in the month with the date change.
+	 * will cause problems in the day and month with the date change.
 	 */
 
 	start_tm.tm_sec = 0;
 	start_tm.tm_min = 0;
-	start_tm.tm_hour++;
 	start_tm.tm_isdst = -1;
 	start_time = mktime(&start_tm);
-	end_tm.tm_sec = 59;
-	end_tm.tm_min = 59;
-	end_tm.tm_hour--;
+	end_tm.tm_sec = 0;
+	end_tm.tm_min = 0;
 	end_tm.tm_isdst = -1;
 	end_time = mktime(&end_tm);
+
+/* 	info("hour start %s", ctime(&start_time)); */
+/* 	info("hour end %s", ctime(&end_time)); */
+/* 	info("diff is %d", end_time-start_time); */
+
 	if(end_time-start_time > 0) {
 		START_TIMER;
 		if((rc = mysql_hourly_rollup(mysql_conn, start_time, end_time)) 
@@ -4582,7 +5092,7 @@ extern int acct_storage_p_roll_usage(mysql_conn_t *mysql_conn,
 		query = xstrdup_printf("update %s set hourly_rollup=%d",
 				       last_ran_table, end_time);
 	} else {
-		debug2("no need to run this hour %d < %d", 
+		debug2("no need to run this hour %d <= %d", 
 		       end_time, start_time);
 	}
 
@@ -4593,13 +5103,16 @@ extern int acct_storage_p_roll_usage(mysql_conn_t *mysql_conn,
 	start_tm.tm_sec = 0;
 	start_tm.tm_min = 0;
 	start_tm.tm_hour = 0;
-	start_tm.tm_mday++;
 	start_tm.tm_isdst = -1;
 	start_time = mktime(&start_tm);
-	end_tm.tm_hour = 23;
-	end_tm.tm_mday--;
+	end_tm.tm_hour = 0;
 	end_tm.tm_isdst = -1;
 	end_time = mktime(&end_tm);
+
+/* 	info("day start %s", ctime(&start_time)); */
+/* 	info("day end %s", ctime(&end_time)); */
+/* 	info("diff is %d", end_time-start_time); */
+
 	if(end_time-start_time > 0) {
 		START_TIMER;
 		if((rc = mysql_daily_rollup(mysql_conn, start_time, end_time)) 
@@ -4612,7 +5125,8 @@ extern int acct_storage_p_roll_usage(mysql_conn_t *mysql_conn,
 			query = xstrdup_printf("update %s set daily_rollup=%d",
 					       last_ran_table, end_time);
 	} else {
-		debug2("no need to run this day %d < %d", end_time, start_time);
+		debug2("no need to run this day %d <= %d",
+		       end_time, start_time);
 	}
 
 	if(!localtime_r(&last_month, &start_tm)) {
@@ -4624,15 +5138,21 @@ extern int acct_storage_p_roll_usage(mysql_conn_t *mysql_conn,
 	start_tm.tm_min = 0;
 	start_tm.tm_hour = 0;
 	start_tm.tm_mday = 1;
-	start_tm.tm_mon++;
 	start_tm.tm_isdst = -1;
 	start_time = mktime(&start_tm);
-	end_tm.tm_sec = -1;
+	end_time = mktime(&end_tm);
+
+	end_tm.tm_sec = 0;
 	end_tm.tm_min = 0;
 	end_tm.tm_hour = 0;
 	end_tm.tm_mday = 1;
 	end_tm.tm_isdst = -1;
 	end_time = mktime(&end_tm);
+
+/* 	info("month start %s", ctime(&start_time)); */
+/* 	info("month end %s", ctime(&end_time)); */
+/* 	info("diff is %d", end_time-start_time); */
+
 	if(end_time-start_time > 0) {
 		START_TIMER;
 		if((rc = mysql_monthly_rollup(
@@ -4641,13 +5161,13 @@ extern int acct_storage_p_roll_usage(mysql_conn_t *mysql_conn,
 		END_TIMER2("monthly_rollup");
 
 		if(query) 
-			xstrfmtcat(query, ", montly_rollup=%d", end_time);
+			xstrfmtcat(query, ", monthly_rollup=%d", end_time);
 		else 
 			query = xstrdup_printf(
 				"update %s set monthly_rollup=%d",
 				last_ran_table, end_time);
 	} else {
-		debug2("no need to run this month %d < %d",
+		debug2("no need to run this month %d <= %d",
 		       end_time, start_time);
 	}
 	
@@ -4673,18 +5193,8 @@ extern int clusteracct_storage_p_node_down(mysql_conn_t *mysql_conn,
 	char *query = NULL;
 	char *my_reason;
 
-	if(!mysql_conn) {
-		error("We need a connection to run this");
+	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
 		return SLURM_ERROR;
-	} else if(!mysql_conn->acct_mysql_db
-		  || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) {
-		if(mysql_get_db_connection(&mysql_conn->acct_mysql_db,
-					   mysql_db_name, mysql_db_info)
-			   != SLURM_SUCCESS) {
-			error("unable to re-connect to mysql database");
-			return SLURM_ERROR;
-		}
-	}
 
 	if (slurmctld_conf.fast_schedule && !slurmdbd_conf)
 		cpus = node_ptr->config_ptr->cpus;
@@ -4701,7 +5211,7 @@ extern int clusteracct_storage_p_node_down(mysql_conn_t *mysql_conn,
 	query = xstrdup_printf(
 		"update %s set period_end=%d where cluster='%s' "
 		"and period_end=0 and node_name='%s';",
-		event_table, (event_time-1), cluster, node_ptr->name);
+		event_table, event_time, cluster, node_ptr->name);
 	xstrfmtcat(query,
 		   "insert into %s "
 		   "(node_name, cluster, cpu_count, period_start, reason) "
@@ -4725,23 +5235,13 @@ extern int clusteracct_storage_p_node_up(mysql_conn_t *mysql_conn,
 	char* query;
 	int rc = SLURM_SUCCESS;
 
-	if(!mysql_conn) {
-		error("We need a connection to run this");
+	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
 		return SLURM_ERROR;
-	} else if(!mysql_conn->acct_mysql_db
-		  || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) {
-		if(mysql_get_db_connection(&mysql_conn->acct_mysql_db,
-					   mysql_db_name, mysql_db_info)
-			   != SLURM_SUCCESS) {
-			error("unable to re-connect to mysql database");
-			return SLURM_ERROR;
-		}
-	}
 
 	query = xstrdup_printf(
 		"update %s set period_end=%d where cluster='%s' "
 		"and period_end=0 and node_name='%s';",
-		event_table, (event_time-1), cluster, node_ptr->name);
+		event_table, event_time, cluster, node_ptr->name);
 	rc = mysql_db_query(mysql_conn->acct_mysql_db, query);
 	xfree(query);
 	return rc;
@@ -4762,31 +5262,13 @@ extern int clusteracct_storage_p_cluster_procs(mysql_conn_t *mysql_conn,
 					       time_t event_time)
 {
 #ifdef HAVE_MYSQL
-	static uint32_t last_procs = -1;
 	char* query;
 	int rc = SLURM_SUCCESS;
 	MYSQL_RES *result = NULL;
 	MYSQL_ROW row;
 
-	if (procs == last_procs) {
-		debug3("we have the same procs as before no need to "
-		       "update the database.");
-		return SLURM_SUCCESS;
-	}
-	last_procs = procs;
-
-	if(!mysql_conn) {
-		error("We need a connection to run this");
+ 	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
 		return SLURM_ERROR;
-	} else if(!mysql_conn->acct_mysql_db
-		  || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) {
-		if(mysql_get_db_connection(&mysql_conn->acct_mysql_db,
-					   mysql_db_name, mysql_db_info)
-			   != SLURM_SUCCESS) {
-			error("unable to re-connect to mysql database");
-			return SLURM_ERROR;
-		}
-	}
 
 	/* Record the processor count */
 	query = xstrdup_printf(
@@ -4808,7 +5290,8 @@ extern int clusteracct_storage_p_cluster_procs(mysql_conn_t *mysql_conn,
 	}
 
 	if(atoi(row[0]) == procs) {
-		debug("%s hasn't changed since last entry", cluster);
+		debug3("we have the same procs as before no need to "
+		       "update the database.");
 		goto end_it;
 	}
 	debug("%s has changed from %s cpus to %u", cluster, row[0], procs);   
@@ -4816,7 +5299,7 @@ extern int clusteracct_storage_p_cluster_procs(mysql_conn_t *mysql_conn,
 	query = xstrdup_printf(
 		"update %s set period_end=%d where cluster='%s' "
 		"and period_end=0 and node_name=''",
-		event_table, (event_time-1), cluster);
+		event_table, event_time, cluster);
 	rc = mysql_db_query(mysql_conn->acct_mysql_db, query);
 	xfree(query);
 	if(rc != SLURM_SUCCESS)
@@ -4842,8 +5325,141 @@ extern int clusteracct_storage_p_get_usage(
 	acct_cluster_rec_t *cluster_rec, time_t start, time_t end)
 {
 #ifdef HAVE_MYSQL
+	int rc = SLURM_SUCCESS;
+	int i=0;
+	MYSQL_RES *result = NULL;
+	MYSQL_ROW row;
+	char *tmp = NULL;
+	char *my_usage_table = cluster_day_table;
+	time_t my_time = time(NULL);
+	struct tm start_tm;
+	struct tm end_tm;
+	char *query = NULL;
+	char *cluster_req_inx[] = {
+		"alloc_cpu_secs",
+		"down_cpu_secs",
+		"idle_cpu_secs",
+		"resv_cpu_secs",
+		"over_cpu_secs",
+		"cpu_count",
+		"period_start"
+	};
+	
+	enum {
+		CLUSTER_ACPU,
+		CLUSTER_DCPU,
+		CLUSTER_ICPU,
+		CLUSTER_RCPU,
+		CLUSTER_OCPU,
+		CLUSTER_CPU_COUNT,
+		CLUSTER_START,
+		CLUSTER_COUNT
+	};
 
-	return SLURM_SUCCESS;
+	if(!cluster_rec->name) {
+		error("We need a cluster name to set data for");
+		return SLURM_ERROR;
+	}
+
+	/* Default is going to be the last day */
+	if(!end) {
+		if(!localtime_r(&my_time, &end_tm)) {
+			error("Couldn't get localtime from end %d",
+			      my_time);
+			return SLURM_ERROR;
+		}
+		end_tm.tm_hour = 0;
+		end = mktime(&end_tm);		
+	} else {
+		if(!localtime_r(&end, &end_tm)) {
+			error("Couldn't get localtime from user end %d",
+			      my_time);
+			return SLURM_ERROR;
+		}
+	}
+	end_tm.tm_sec = 0;
+	end_tm.tm_min = 0;
+	end_tm.tm_isdst = -1;
+	end = mktime(&end_tm);		
+
+	if(!start) {
+		if(!localtime_r(&my_time, &start_tm)) {
+			error("Couldn't get localtime from start %d",
+			      my_time);
+			return SLURM_ERROR;
+		}
+		start_tm.tm_hour = 0;
+		start_tm.tm_mday--;
+		start = mktime(&start_tm);		
+	} else {
+		if(!localtime_r(&start, &start_tm)) {
+			error("Couldn't get localtime from user start %d",
+			      my_time);
+			return SLURM_ERROR;
+		}
+	}
+	start_tm.tm_sec = 0;
+	start_tm.tm_min = 0;
+	start_tm.tm_isdst = -1;
+	start = mktime(&start_tm);		
+
+	if(end-start < 3600) {
+		end = start + 3600;
+		if(!localtime_r(&end, &end_tm)) {
+			error("2 Couldn't get localtime from user end %d",
+			      my_time);
+			return SLURM_ERROR;
+		}
+	}
+	/* check to see if we are off day boundaries or on month
+	 * boundaries other wise use the day table.
+	 */
+	if(start_tm.tm_hour || end_tm.tm_hour || (end-start < 86400)) 
+		my_usage_table = cluster_hour_table;
+	else if(start_tm.tm_mday == 0 && end_tm.tm_mday == 0 
+		&& (end-start > 86400))
+		my_usage_table = cluster_month_table;
+
+	xfree(tmp);
+	i=0;
+	xstrfmtcat(tmp, "%s", cluster_req_inx[i]);
+	for(i=1; i<CLUSTER_COUNT; i++) {
+		xstrfmtcat(tmp, ", %s", cluster_req_inx[i]);
+	}
+
+	query = xstrdup_printf(
+		"select %s from %s where (period_start < %d "
+		"&& period_start >= %d) and cluster='%s'",
+		tmp, my_usage_table, end, start, cluster_rec->name);
+
+	xfree(tmp);
+	debug3("%d query\n%s", mysql_conn->conn, query);
+	if(!(result = mysql_db_query_ret(
+		     mysql_conn->acct_mysql_db, query, 0))) {
+		xfree(query);
+		return SLURM_ERROR;
+	}
+	xfree(query);
+
+	if(!cluster_rec->accounting_list)
+		cluster_rec->accounting_list =
+			list_create(destroy_cluster_accounting_rec);
+	
+	while((row = mysql_fetch_row(result))) {
+		cluster_accounting_rec_t *accounting_rec =
+			xmalloc(sizeof(cluster_accounting_rec_t));
+		accounting_rec->alloc_secs = atoll(row[CLUSTER_ACPU]);
+		accounting_rec->down_secs = atoll(row[CLUSTER_DCPU]);
+		accounting_rec->idle_secs = atoll(row[CLUSTER_ICPU]);
+		accounting_rec->over_secs = atoll(row[CLUSTER_OCPU]);
+		accounting_rec->resv_secs = atoll(row[CLUSTER_RCPU]);
+		accounting_rec->cpu_count = atoi(row[CLUSTER_CPU_COUNT]);
+		accounting_rec->period_start = atoi(row[CLUSTER_START]);
+		list_append(cluster_rec->accounting_list, accounting_rec);
+	}
+	mysql_free_result(result);
+
+	return rc;
 #else
 	return SLURM_ERROR;
 #endif
@@ -4870,18 +5486,8 @@ extern int jobacct_storage_p_job_start(mysql_conn_t *mysql_conn,
 		return SLURM_ERROR;
 	}
 
-	if(!mysql_conn) {
-		error("We need a connection to run this");
+	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
 		return SLURM_ERROR;
-	} else if(!mysql_conn->acct_mysql_db
-		  || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) {
-		if(mysql_get_db_connection(&mysql_conn->acct_mysql_db,
-					   mysql_db_name, mysql_db_info)
-			   != SLURM_SUCCESS) {
-			error("unable to re-connect to mysql database");
-			return SLURM_ERROR;
-		}
-	}
 	
 	debug2("mysql_jobacct_job_start() called");
 	priority = (job_ptr->priority == NO_VAL) ?
@@ -4919,6 +5525,13 @@ extern int jobacct_storage_p_job_start(mysql_conn_t *mysql_conn,
 
 	job_ptr->requid = -1; /* force to -1 for sacct to know this
 			       * hasn't been set yet */
+	
+	/* We need to put a 0 for 'end' incase of funky job state
+	 * files from a hot start of the controllers we call
+	 * job_start on jobs we may still know about after
+	 * job_flush has been called so we need to restart
+	 * them by zeroing out the end.
+	 */
 	if(!job_ptr->db_index) {
 		query = xstrdup_printf(
 			"insert into %s "
@@ -4928,7 +5541,8 @@ extern int jobacct_storage_p_job_start(mysql_conn_t *mysql_conn,
 			"values (%u, '%s', %u, %u, %u, '%s', '%s', "
 			"%d, %d, %d, '%s', %u, "
 			"%u, %u, %u, %u, '%s') "
-			"on duplicate key update id=LAST_INSERT_ID(id)",
+			"on duplicate key update id=LAST_INSERT_ID(id), "
+			"end=0, state=%u",
 			job_table, job_ptr->job_id, job_ptr->account, 
 			job_ptr->assoc_id,
 			job_ptr->user_id, job_ptr->group_id,
@@ -4939,7 +5553,8 @@ extern int jobacct_storage_p_job_start(mysql_conn_t *mysql_conn,
 			jname, track_steps,
 			job_ptr->job_state & (~JOB_COMPLETING),
 			priority, job_ptr->num_procs,
-			job_ptr->total_procs, nodes);
+			job_ptr->total_procs, nodes,
+			job_ptr->job_state & (~JOB_COMPLETING));
 
 	try_again:
 		if(!(job_ptr->db_index = mysql_insert_ret_id(
@@ -4961,7 +5576,7 @@ extern int jobacct_storage_p_job_start(mysql_conn_t *mysql_conn,
 		query = xstrdup_printf(
 			"update %s set partition='%s', blockid='%s', start=%d, "
 			"name='%s', state=%u, alloc_cpus=%u, nodelist='%s', "
-			"account='%s' where id=%d",
+			"account='%s', end=0 where id=%d",
 			job_table, job_ptr->partition, block_id,
 			(int)job_ptr->start_time,
 			jname, 
@@ -4999,18 +5614,8 @@ extern int jobacct_storage_p_job_complete(mysql_conn_t *mysql_conn,
 		return SLURM_ERROR;
 	}
 
-	if(!mysql_conn) {
-		error("We need a connection to run this");
+	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
 		return SLURM_ERROR;
-	} else if(!mysql_conn->acct_mysql_db
-		  || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) {
-		if(mysql_get_db_connection(&mysql_conn->acct_mysql_db,
-					   mysql_db_name, mysql_db_info)
-			   != SLURM_SUCCESS) {
-			error("unable to re-connect to mysql database");
-			return SLURM_ERROR;
-		}
-	}
 	debug2("mysql_jobacct_job_complete() called");
 	if (job_ptr->end_time == 0) {
 		debug("mysql_jobacct: job %u never started", job_ptr->job_id);
@@ -5031,6 +5636,7 @@ extern int jobacct_storage_p_job_complete(mysql_conn_t *mysql_conn,
 			
 		}
 	}
+
 	query = xstrdup_printf("update %s set start=%u, end=%u, state=%d, "
 			       "nodelist='%s', comp_code=%u, "
 			       "kill_requid=%u where id=%u",
@@ -5071,18 +5677,8 @@ extern int jobacct_storage_p_step_start(mysql_conn_t *mysql_conn,
 		return SLURM_ERROR;
 	}
 
-	if(!mysql_conn) {
-		error("We need a connection to run this");
+	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
 		return SLURM_ERROR;
-	} else if(!mysql_conn->acct_mysql_db
-		  || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) {
-		if(mysql_get_db_connection(&mysql_conn->acct_mysql_db,
-					   mysql_db_name, mysql_db_info)
-			   != SLURM_SUCCESS) {
-			error("unable to re-connect to mysql database");
-			return SLURM_ERROR;
-		}
-	}
 	if(slurmdbd_conf) {
 		cpus = step_ptr->job_ptr->total_procs;
 		snprintf(node_list, BUFFER_SIZE, "%s",
@@ -5132,11 +5728,11 @@ extern int jobacct_storage_p_step_start(mysql_conn_t *mysql_conn,
 		"insert into %s (id, stepid, start, name, state, "
 		"cpus, nodelist) "
 		"values (%d, %u, %d, '%s', %d, %u, '%s') "
-		"on duplicate key update cpus=%u",
+		"on duplicate key update cpus=%u, end=0, state=%u",
 		step_table, step_ptr->job_ptr->db_index,
 		step_ptr->step_id, 
 		(int)step_ptr->start_time, step_ptr->name,
-		JOB_RUNNING, cpus, node_list, cpus);
+		JOB_RUNNING, cpus, node_list, cpus, JOB_RUNNING);
 	debug3("%d query\n%s", mysql_conn->conn, query);
 	rc = mysql_db_query(mysql_conn->acct_mysql_db, query);
 	xfree(query);
@@ -5179,18 +5775,8 @@ extern int jobacct_storage_p_step_complete(mysql_conn_t *mysql_conn,
 		jobacct = &dummy_jobacct;
 	}
 
-	if(!mysql_conn) {
-		error("We need a connection to run this");
+	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
 		return SLURM_ERROR;
-	} else if(!mysql_conn->acct_mysql_db
-		  || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) {
-		if(mysql_get_db_connection(&mysql_conn->acct_mysql_db,
-					   mysql_db_name, mysql_db_info)
-			   != SLURM_SUCCESS) {
-			error("unable to re-connect to mysql database");
-			return SLURM_ERROR;
-		}
-	}
 
 	if(slurmdbd_conf) {
 		now = step_ptr->job_ptr->end_time;
@@ -5215,7 +5801,7 @@ extern int jobacct_storage_p_step_complete(mysql_conn_t *mysql_conn,
 		comp_status = JOB_FAILED;
 	else
 		comp_status = JOB_COMPLETE;
-
+       
 	/* figure out the ave of the totals sent */
 	if(cpus > 0) {
 		ave_vsize = jobacct->tot_vsize;
@@ -5307,18 +5893,8 @@ extern int jobacct_storage_p_suspend(mysql_conn_t *mysql_conn,
 	int rc = SLURM_SUCCESS;
 	bool suspended = false;
 
-	if(!mysql_conn) {
-		error("We need a connection to run this");
+	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
 		return SLURM_ERROR;
-	} else if(!mysql_conn->acct_mysql_db
-		  || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) {
-		if(mysql_get_db_connection(&mysql_conn->acct_mysql_db,
-					   mysql_db_name, mysql_db_info)
-			   != SLURM_SUCCESS) {
-			error("unable to re-connect to mysql database");
-			return SLURM_ERROR;
-		}
-	}
 	if(!job_ptr->db_index) {
 		job_ptr->db_index = _get_db_index(mysql_conn->acct_mysql_db,
 						  job_ptr->details->submit_time,
@@ -5377,26 +5953,53 @@ extern int jobacct_storage_p_suspend(mysql_conn_t *mysql_conn,
 extern List jobacct_storage_p_get_jobs(mysql_conn_t *mysql_conn, 
 				       List selected_steps,
 				       List selected_parts,
-				       void *params)
+				       sacct_parameters_t *params)
 {
 	List job_list = NULL;
 #ifdef HAVE_MYSQL
-	if(!mysql_conn) {
-		error("We need a connection to run this");
+	acct_job_cond_t job_cond;
+	struct passwd *pw = NULL;
+
+	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
 		return NULL;
-	} else if(!mysql_conn->acct_mysql_db
-		  || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) {
-		if(mysql_get_db_connection(&mysql_conn->acct_mysql_db,
-					   mysql_db_name, mysql_db_info)
-			   != SLURM_SUCCESS) {
-			error("unable to re-connect to mysql database");
-			return NULL;
-		}
+	memset(&job_cond, 0, sizeof(acct_job_cond_t));
+
+	job_cond.step_list = selected_steps;
+	job_cond.partition_list = selected_parts;
+	if(params->opt_cluster) {
+		job_cond.cluster_list = list_create(NULL);
+		list_append(job_cond.cluster_list, params->opt_cluster);
 	}
-	job_list = mysql_jobacct_process_get_jobs(mysql_conn,
-						  selected_steps,
-						  selected_parts,
-						  params);	
+
+	if (params->opt_uid >=0 && (pw=getpwuid(params->opt_uid))) {
+		job_cond.user_list = list_create(NULL);
+		list_append(job_cond.user_list, pw->pw_name);
+	}	
+
+	job_list = mysql_jobacct_process_get_jobs(mysql_conn, &job_cond);
+
+	if(job_cond.user_list)
+		list_destroy(job_cond.user_list);
+	if(job_cond.cluster_list)
+		list_destroy(job_cond.cluster_list);
+		
+#endif
+	return job_list;
+}
+
+/* 
+ * get info from the storage 
+ * returns List of job_rec_t *
+ * note List needs to be freed when called
+ */
+extern List jobacct_storage_p_get_jobs_cond(mysql_conn_t *mysql_conn, 
+					    acct_job_cond_t *job_cond)
+{
+	List job_list = NULL;
+#ifdef HAVE_MYSQL
+	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
+		return NULL;
+	job_list = mysql_jobacct_process_get_jobs(mysql_conn, job_cond);	
 #endif
 	return job_list;
 }
@@ -5409,15 +6012,8 @@ extern void jobacct_storage_p_archive(mysql_conn_t *mysql_conn,
 				      void *params)
 {
 #ifdef HAVE_MYSQL
-	if(!mysql_conn) {
-		error("We need a connection to run this");
+	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
 		return;
-	} else if(!mysql_conn->acct_mysql_db
-		  || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) {
-		if(!mysql_get_db_connection(&mysql_conn->acct_mysql_db,
-					    mysql_db_name, mysql_db_info))
-			return;
-	}
 	mysql_jobacct_process_archive(mysql_conn,
 				      selected_parts, params);
 #endif
@@ -5438,30 +6034,78 @@ extern int acct_storage_p_flush_jobs_on_cluster(
 	int rc = SLURM_SUCCESS;
 #ifdef HAVE_MYSQL
 	/* put end times for a clean start */
+	MYSQL_RES *result = NULL;
+	MYSQL_ROW row;
 	char *query = NULL;
+	char *id_char = NULL;
+	char *suspended_char = NULL;
 
-	if(!mysql_conn) {
-		error("We need a connection to run this");
+	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
 		return SLURM_ERROR;
-	} else if(!mysql_conn->acct_mysql_db
-		  || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) {
-		if(mysql_get_db_connection(&mysql_conn->acct_mysql_db,
-					   mysql_db_name, mysql_db_info)
-			   != SLURM_SUCCESS) {
-			error("unable to re-connect to mysql database");
-			return SLURM_ERROR;
-		}
-	}
 
-	query = xstrdup_printf("update %s as t1, %s as t2 set "
-			       "t1.state=%u, t1.end=%u where "
-			       "t2.id=t1.associd and t2.cluster='%s' "
+	/* First we need to get the id's and states so we can clean up
+	 * the suspend table and the step table 
+	 */
+	query = xstrdup_printf("select t1.id, t1.state from %s as t1, %s as t2 "
+			       "where t2.id=t1.associd and t2.cluster='%s' "
 			       "&& t1.end=0;",
-			       job_table, assoc_table, JOB_CANCELLED, 
-			       event_time, cluster);
-
-	rc = mysql_db_query(mysql_conn->acct_mysql_db, query);
+			       job_table, assoc_table, cluster);
+	if(!(result =
+	     mysql_db_query_ret(mysql_conn->acct_mysql_db, query, 0))) {
+		xfree(query);
+		return SLURM_ERROR;
+	}
 	xfree(query);
+
+	while((row = mysql_fetch_row(result))) {
+		int state = atoi(row[1]);
+		if(state == JOB_SUSPENDED) {
+			if(suspended_char) 
+				xstrfmtcat(suspended_char, " || id=%s", row[0]);
+			else
+				xstrfmtcat(suspended_char, "id=%s", row[0]);
+		}
+		
+		if(id_char) 
+			xstrfmtcat(id_char, " || id=%s", row[0]);
+		else
+			xstrfmtcat(id_char, "id=%s", row[0]);
+	}
+	mysql_free_result(result);
+	
+	if(suspended_char) {
+		xstrfmtcat(query,
+			   "update %s set suspended=%d-suspended where %s;",
+			   job_table, event_time, suspended_char);
+		xstrfmtcat(query,
+			   "update %s set suspended=%d-suspended where %s;",
+			   step_table, event_time, suspended_char);
+		xstrfmtcat(query,
+			   "update %s set end=%d where (%s) && end=0;",
+			   suspend_table, event_time, suspended_char);
+		xfree(suspended_char);
+	}
+	if(id_char) {
+		xstrfmtcat(query,
+			   "update %s set state=%d, end=%u where %s;",
+			   job_table, JOB_CANCELLED, event_time, id_char);
+		xstrfmtcat(query,
+			   "update %s set state=%d, end=%u where %s;",
+			   step_table, JOB_CANCELLED, event_time, id_char);
+		xfree(id_char);
+	}
+/* 	query = xstrdup_printf("update %s as t1, %s as t2 set " */
+/* 			       "t1.state=%u, t1.end=%u where " */
+/* 			       "t2.id=t1.associd and t2.cluster='%s' " */
+/* 			       "&& t1.end=0;", */
+/* 			       job_table, assoc_table, JOB_CANCELLED,  */
+/* 			       event_time, cluster); */
+	if(query) {
+		debug3("%d query\n%s", mysql_conn->conn, query);
+		
+		rc = mysql_db_query(mysql_conn->acct_mysql_db, query);
+		xfree(query);
+	}
 #endif
 
 	return rc;
diff --git a/src/plugins/accounting_storage/mysql/mysql_jobacct_process.c b/src/plugins/accounting_storage/mysql/mysql_jobacct_process.c
index 359c1926ff760c4f64392121e59e65cb76d4d0a4..7d669562f8a8a64bb677f25aadda8fdfc3952d54 100644
--- a/src/plugins/accounting_storage/mysql/mysql_jobacct_process.c
+++ b/src/plugins/accounting_storage/mysql/mysql_jobacct_process.c
@@ -45,25 +45,19 @@
 #include "mysql_jobacct_process.h"
 
 #ifdef HAVE_MYSQL
-static void _do_fdump(List job_list)
-{
-	info("fdump option not applicable from mysql plugin");
-	return;
-}
 
 extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn,
-					   List selected_steps,
-					   List selected_parts,
-					   sacct_parameters_t *params)
+					   acct_job_cond_t *job_cond)
 {
 
 	char *query = NULL;	
 	char *extra = NULL;	
 	char *tmp = NULL;	
-	char *selected_part = NULL;
+	char *object = NULL;
 	jobacct_selected_step_t *selected_step = NULL;
 	ListIterator itr = NULL;
 	int set = 0;
+	char *table_level="t2";
 	MYSQL_RES *result = NULL, *step_result = NULL;
 	MYSQL_ROW row, step_row;
 	int i;
@@ -97,7 +91,10 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn,
 		"t1.alloc_cpus",
 		"t1.nodelist",
 		"t1.kill_requid",
-		"t1.qos"
+		"t1.qos",
+		"t2.user",
+		"t2.cluster",
+		"t2.lft"
 	};
 
 	/* if this changes you will need to edit the corresponding 
@@ -159,6 +156,9 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn,
 		JOB_REQ_NODELIST,
 		JOB_REQ_KILL_REQUID,
 		JOB_REQ_QOS,
+		JOB_REQ_USER_NAME,
+		JOB_REQ_CLUSTER,
+		JOB_REQ_LFT,
 		JOB_REQ_COUNT		
 	};
 	enum {
@@ -195,62 +195,175 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn,
 		STEP_REQ_COUNT
 	};
 
-	if(selected_steps && list_count(selected_steps)) {
+	if(!job_cond)
+		goto no_cond;
+
+	/* THIS ASSOCID CHECK ALWAYS NEEDS TO BE FIRST!!!!!!! */
+	if(job_cond->associd_list && list_count(job_cond->associd_list)) {
+		set = 0;
+		xstrfmtcat(extra, ", %s as t3 where (");
+		itr = list_iterator_create(job_cond->associd_list);
+		while((object = list_next(itr))) {
+			if(set) 
+				xstrcat(extra, " || ");
+			xstrfmtcat(extra, "t3.id=%s", object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(extra, ")");
+		table_level="t3";
+		/* just incase the association is gone */
+		if(set) 
+			xstrcat(extra, " || ");
+		xstrfmtcat(extra, "t3.id is null) && "
+			   "(t2.lft between t3.lft and t3.rgt "
+			   "|| t2.lft is null)");
+	}
+
+	if(job_cond->acct_list && list_count(job_cond->acct_list)) {
 		set = 0;
 		if(extra)
 			xstrcat(extra, " && (");
 		else
 			xstrcat(extra, " where (");
-		itr = list_iterator_create(selected_steps);
-		while((selected_step = list_next(itr))) {
+		itr = list_iterator_create(job_cond->acct_list);
+		while((object = list_next(itr))) {
 			if(set) 
 				xstrcat(extra, " || ");
-			tmp = xstrdup_printf("t1.jobid=%u",
-					      selected_step->jobid);
-			xstrcat(extra, tmp);
+			xstrfmtcat(extra, "t1.acct='%s'", object);
 			set = 1;
-			xfree(tmp);
 		}
 		list_iterator_destroy(itr);
 		xstrcat(extra, ")");
 	}
 
-	if(selected_parts && list_count(selected_parts)) {
+	if(job_cond->groupid_list && list_count(job_cond->groupid_list)) {
 		set = 0;
 		if(extra)
 			xstrcat(extra, " && (");
 		else
 			xstrcat(extra, " where (");
-		itr = list_iterator_create(selected_parts);
-		while((selected_part = list_next(itr))) {
+		itr = list_iterator_create(job_cond->groupid_list);
+		while((object = list_next(itr))) {
 			if(set) 
 				xstrcat(extra, " || ");
-			tmp = xstrdup_printf("t1.partition='%s'",
-					      selected_part);
-			xstrcat(extra, tmp);
+			xstrfmtcat(extra, "t1.gid=", object);
 			set = 1;
-			xfree(tmp);
 		}
 		list_iterator_destroy(itr);
 		xstrcat(extra, ")");
 	}
-	
-	for(i=0; i<JOB_REQ_COUNT; i++) {
-		if(i) 
-			xstrcat(tmp, ", ");
-		xstrcat(tmp, job_req_inx[i]);
+
+	if(job_cond->partition_list && list_count(job_cond->partition_list)) {
+		set = 0;
+		if(extra)
+			xstrcat(extra, " && (");
+		else
+			xstrcat(extra, " where (");
+		itr = list_iterator_create(job_cond->partition_list);
+		while((object = list_next(itr))) {
+			if(set) 
+				xstrcat(extra, " || ");
+			xstrfmtcat(extra, "t1.partition='%s'", object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(extra, ")");
+	}
+
+	if(job_cond->step_list && list_count(job_cond->step_list)) {
+		set = 0;
+		if(extra)
+			xstrcat(extra, " && (");
+		else
+			xstrcat(extra, " where (");
+		itr = list_iterator_create(job_cond->step_list);
+		while((selected_step = list_next(itr))) {
+			if(set) 
+				xstrcat(extra, " || ");
+			xstrfmtcat(extra, "t1.jobid=%u", selected_step->jobid);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(extra, ")");
+	}
+
+	if(job_cond->usage_start) {
+		if(!job_cond->usage_end)
+			job_cond->usage_end = now;
+
+		if(extra)
+			xstrcat(extra, " && (");
+		else
+			xstrcat(extra, " where (");
+		xstrfmtcat(extra, 
+			   "(t1.eligible < %d "
+			   "&& (t1.end >= %d || t1.end = 0)))",
+			   job_cond->usage_end, job_cond->usage_start);
+	}
+
+	/* we need to put all the associations (t2) stuff together here */
+	if(job_cond->cluster_list && list_count(job_cond->cluster_list)) {
+		set = 0;
+		if(extra)
+			xstrcat(extra, " && (");
+		else
+			xstrcat(extra, " where (");
+
+		itr = list_iterator_create(job_cond->cluster_list);
+		while((object = list_next(itr))) {
+			if(set) 
+				xstrcat(extra, " || ");
+			xstrfmtcat(extra, "%s.cluster='%s'", 
+				   table_level, object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		/* just incase the association is gone */
+		if(set) 
+			xstrcat(extra, " || ");
+		xstrfmtcat(extra, "%s.cluster is null)", table_level);
+	}
+
+	if(job_cond->user_list && list_count(job_cond->user_list)) {
+		set = 0;
+		if(extra)
+			xstrcat(extra, " && (");
+		else
+			xstrcat(extra, " where (");
+
+		itr = list_iterator_create(job_cond->user_list);
+		while((object = list_next(itr))) {
+			if(set) 
+				xstrcat(extra, " || ");
+			xstrfmtcat(extra, "%s.user='%s'", table_level, object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		/* just incase the association is gone */
+		if(set) 
+			xstrcat(extra, " || ");
+		xstrfmtcat(extra, "%s.user is null)", table_level);
+	}
+
+no_cond:	
+
+	xfree(tmp);
+	xstrfmtcat(tmp, "%s", job_req_inx[0]);
+	for(i=1; i<JOB_REQ_COUNT; i++) {
+		xstrfmtcat(tmp, ", %s", job_req_inx[i]);
 	}
 	
-	query = xstrdup_printf("select %s from %s t1",
-			       tmp, job_table);
+	query = xstrdup_printf("select %s from %s as t1 left join %s as t2 "
+			       "on t1.associd=t2.id",
+			       tmp, job_table, assoc_table);
 	xfree(tmp);
-
 	if(extra) {
 		xstrcat(query, extra);
 		xfree(extra);
 	}
-
-	//info("query = %s", query);
+	
+	debug3("%d query\n%s", mysql_conn->conn, query);
 	if(!(result = mysql_db_query_ret(
 		     mysql_conn->acct_mysql_db, query, 0))) {
 		xfree(query);
@@ -261,45 +374,101 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn,
 
 	while((row = mysql_fetch_row(result))) {
 		char *id = row[JOB_REQ_ID];
-		acct_association_rec_t account_rec;
-		memset(&account_rec, 0, sizeof(acct_association_rec_t));
+		
 		job = create_jobacct_job_rec();
 
 		job->alloc_cpus = atoi(row[JOB_REQ_ALLOC_CPUS]);
-		account_rec.id = job->associd = atoi(row[JOB_REQ_ASSOCID]);
-		assoc_mgr_fill_in_assoc(mysql_conn, &account_rec, 0, NULL);
-		if(account_rec.cluster) {
-			if(params->opt_cluster &&
-			   strcmp(params->opt_cluster, account_rec.cluster)) {
-				destroy_jobacct_job_rec(job);
-				job = NULL;
-				continue;
-			}
-			job->cluster = xstrdup(account_rec.cluster);
-		}
+		job->associd = atoi(row[JOB_REQ_ASSOCID]);
+
+		if(row[JOB_REQ_CLUSTER])
+			job->cluster = xstrdup(row[JOB_REQ_CLUSTER]);
 
-		if(account_rec.user) 
-			job->user = xstrdup(account_rec.user);
+		if(row[JOB_REQ_USER_NAME]) 
+			job->user = xstrdup(row[JOB_REQ_USER_NAME]);
 		else 
 			job->uid = atoi(row[JOB_REQ_UID]);
-		if(account_rec.acct) 
-			job->account = xstrdup(account_rec.acct);
-		else
+
+		if(row[JOB_REQ_LFT])
+			job->lft = atoi(row[JOB_REQ_LFT]);
+
+		if(row[JOB_REQ_ACCOUNT])
 			job->account = xstrdup(row[JOB_REQ_ACCOUNT]);
-	
-		job->blockid = xstrdup(row[JOB_REQ_BLOCKID]);
+		if(row[JOB_REQ_BLOCKID])
+			job->blockid = xstrdup(row[JOB_REQ_BLOCKID]);
 
 		job->eligible = atoi(row[JOB_REQ_ELIGIBLE]);
 		job->submit = atoi(row[JOB_REQ_SUBMIT]);
 		job->start = atoi(row[JOB_REQ_START]);
 		job->end = atoi(row[JOB_REQ_END]);
-		job->suspended = atoi(row[JOB_REQ_SUSPENDED]);
-		if(!job->end) {
-			job->elapsed = now - job->start;
-		} else {
+		if(job_cond->usage_start) {
+			if(job->start && (job->start < job_cond->usage_start))
+				job->start = job_cond->usage_start;
+
+			if(!job->start && job->end)
+				job->start = job->end;
+
+			if(!job->end || job->end > job_cond->usage_end) 
+				job->end = job_cond->usage_end;
+
 			job->elapsed = job->end - job->start;
+
+			if(row[JOB_REQ_SUSPENDED]) {
+				MYSQL_RES *result2 = NULL;
+				MYSQL_ROW row2;
+				/* get the suspended time for this job */
+				query = xstrdup_printf(
+					"select start, end from %s where "
+					"(start < %d && (end >= %d "
+					"|| end = 0)) && id=%s "
+					"order by start",
+					suspend_table,
+					job_cond->usage_end, 
+					job_cond->usage_start,
+					row[JOB_REQ_ID]);
+				
+				debug4("%d query\n%s", mysql_conn->conn, query);
+				if(!(result2 = mysql_db_query_ret(
+					     mysql_conn->acct_mysql_db,
+					     query, 0))) {
+					list_destroy(job_list);
+					job_list = NULL;
+					break;
+				}
+				xfree(query);
+				while((row2 = mysql_fetch_row(result2))) {
+					int local_start =
+						atoi(row2[0]);
+					int local_end = 
+						atoi(row2[1]);
+					
+					if(!local_start)
+						continue;
+					
+					if(job->start > local_start)
+						local_start = job->start;
+					if(job->end < local_end)
+						local_end = job->end;
+					
+					if((local_end - local_start) < 1)
+						continue;
+					
+					job->elapsed -= 
+						(local_end - local_start);
+					job->suspended += 
+						(local_end - local_start);
+				}
+				mysql_free_result(result2);			
+
+			}
+		} else {
+			job->suspended = atoi(row[JOB_REQ_SUSPENDED]);
+			if(!job->end) {
+				job->elapsed = now - job->start;
+			} else {
+				job->elapsed = job->end - job->start;
+			}
+			job->elapsed -= job->suspended;
 		}
-		job->elapsed -= job->suspended;
 
 		job->jobid = atoi(row[JOB_REQ_JOBID]);
 		job->jobname = xstrdup(row[JOB_REQ_NAME]);
@@ -322,9 +491,10 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn,
 					
 		list_append(job_list, job);
 
-		if(selected_steps && list_count(selected_steps)) {
+		if(job_cond && job_cond->step_list
+		   && list_count(job_cond->step_list)) {
 			set = 0;
-			itr = list_iterator_create(selected_steps);
+			itr = list_iterator_create(job_cond->step_list);
 			while((selected_step = list_next(itr))) {
 				if(selected_step->jobid != job->jobid) {
 					continue;
@@ -339,11 +509,9 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn,
 				else 
 					xstrcat(extra, " && (");
 			
-				tmp = xstrdup_printf("t1.stepid=%u",
-						     selected_step->stepid);
-				xstrcat(extra, tmp);
+				xstrfmtcat(extra, "t1.stepid=%u",
+					   selected_step->stepid);
 				set = 1;
-				xfree(tmp);
 				job->show_full = 0;
 			}
 			list_iterator_destroy(itr);
@@ -382,7 +550,7 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn,
 			step->state = atoi(step_row[STEP_REQ_STATE]);
 			step->exitcode = atoi(step_row[STEP_REQ_COMP_CODE]);
 			step->ncpus = atoi(step_row[STEP_REQ_CPUS]);
-			step->start = atoi(step_row[JOB_REQ_START]);
+			step->start = atoi(step_row[STEP_REQ_START]);
 			
 			step->end = atoi(step_row[STEP_REQ_END]);
 			/* figure this out by start stop */
@@ -405,17 +573,17 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn,
 				step->tot_cpu_usec += 
 				step->user_cpu_usec + step->sys_cpu_usec;
 			step->sacct.max_vsize =
-				atoi(step_row[STEP_REQ_MAX_VSIZE]) * 1024;
+				atoi(step_row[STEP_REQ_MAX_VSIZE]);
 			step->sacct.max_vsize_id.taskid = 
 				atoi(step_row[STEP_REQ_MAX_VSIZE_TASK]);
 			step->sacct.ave_vsize = 
-				atof(step_row[STEP_REQ_AVE_VSIZE]) * 1024;
+				atof(step_row[STEP_REQ_AVE_VSIZE]);
 			step->sacct.max_rss =
-				atoi(step_row[STEP_REQ_MAX_RSS]) * 1024;
+				atoi(step_row[STEP_REQ_MAX_RSS]);
 			step->sacct.max_rss_id.taskid = 
 				atoi(step_row[STEP_REQ_MAX_RSS_TASK]);
 			step->sacct.ave_rss = 
-				atof(step_row[STEP_REQ_AVE_RSS]) * 1024;
+				atof(step_row[STEP_REQ_AVE_RSS]);
 			step->sacct.max_pages =
 				atoi(step_row[STEP_REQ_MAX_PAGES]);
 			step->sacct.max_pages_id.taskid = 
@@ -437,7 +605,7 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn,
 				atoi(step_row[STEP_REQ_MAX_PAGES_NODE]);
 			step->sacct.min_cpu_id.nodeid = 
 				atoi(step_row[STEP_REQ_MIN_CPU_NODE]);
-	
+
 			step->requid = atoi(step_row[STEP_REQ_KILL_REQUID]);
 		}
 		mysql_free_result(step_result);
@@ -447,9 +615,6 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn,
 	}
 	mysql_free_result(result);
 
-	if (params && params->opt_fdump) 
-		_do_fdump(job_list);
-
 	return job_list;
 }
 
diff --git a/src/plugins/accounting_storage/mysql/mysql_jobacct_process.h b/src/plugins/accounting_storage/mysql/mysql_jobacct_process.h
index e9def5417a691a45d6ddf564e04a12befb16e008..c04fee6c40c4efcde525895fc315669ae1bc3685 100644
--- a/src/plugins/accounting_storage/mysql/mysql_jobacct_process.h
+++ b/src/plugins/accounting_storage/mysql/mysql_jobacct_process.h
@@ -66,13 +66,13 @@ typedef struct {
 
 //extern int acct_db_init;
 
+extern char *assoc_table;
 extern char *job_table;
 extern char *step_table;
+extern char *suspend_table;
 
 extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn,
-					   List selected_steps,
-					   List selected_parts,
-					   sacct_parameters_t *params);
+					   acct_job_cond_t *job_cond);
 
 extern void mysql_jobacct_process_archive(mysql_conn_t *mysql_conn,
 					  List selected_parts,
diff --git a/src/plugins/accounting_storage/mysql/mysql_rollup.c b/src/plugins/accounting_storage/mysql/mysql_rollup.c
index 4fe1d41c120c6a67709742dde47fc5e385d65144..6df63f9321cc850a1aacec24b5a5dc479ab81ecb 100644
--- a/src/plugins/accounting_storage/mysql/mysql_rollup.c
+++ b/src/plugins/accounting_storage/mysql/mysql_rollup.c
@@ -44,18 +44,18 @@
 
 typedef struct {
 	int assoc_id;
-	int a_cpu;
+	uint64_t a_cpu;
 } local_assoc_usage_t;
 
 typedef struct {
 	char *name;
-	int total_time;
-	int a_cpu;
+	uint64_t total_time;
+	uint64_t a_cpu;
 	int cpu_count;
-	int d_cpu;
-	int i_cpu;
-	int o_cpu;
-	int r_cpu;
+	uint64_t d_cpu;
+	uint64_t i_cpu;
+	uint64_t o_cpu;
+	uint64_t r_cpu;
 	time_t start;
 	time_t end;
 } local_cluster_usage_t;
@@ -314,7 +314,7 @@ extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 
 			if(!row_end || row_end > curr_end) 
 				row_end = curr_end;
-			
+
 			if(last_id != assoc_id) {
 				a_usage =
 					xmalloc(sizeof(local_cluster_usage_t));
@@ -452,9 +452,13 @@ extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 			 */
 			
 			if(c_usage->i_cpu < 0) {
+/* 				info("got %d %d %d", c_usage->r_cpu, */
+/* 				     c_usage->i_cpu, c_usage->o_cpu); */
 				c_usage->r_cpu += c_usage->i_cpu;
 				c_usage->o_cpu -= c_usage->i_cpu;
 				c_usage->i_cpu = 0;
+				if(c_usage->r_cpu < 0)
+					c_usage->r_cpu = 0;
 			}
 			
 /* 			info("cluster %s(%d) down %d alloc %d " */
@@ -472,21 +476,11 @@ extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 			if(query) {
 				xstrfmtcat(query, 
 					   ", (%d, %d, '%s', %d, %d, "
-					   "%d, %d, %d, %d, %d) "
-					   "on duplicate key update "
-					   "mod_time=%d, cpu_count=%d, "
-					   "alloc_cpu_secs=%d, "
-					   "down_cpu_secs=%d, "
-					   "idle_cpu_secs=%d, "
-					   "over_cpu_secs=%d, resv_cpu_secs=%d",
+					   "%llu, %llu, %llu, %llu, %llu)",
 					   now, now, 
 					   c_usage->name, c_usage->start, 
 					   c_usage->cpu_count, c_usage->a_cpu,
 					   c_usage->d_cpu, c_usage->i_cpu,
-					   c_usage->o_cpu, c_usage->r_cpu,
-					   now, 
-					   c_usage->cpu_count, c_usage->a_cpu,
-					   c_usage->d_cpu, c_usage->i_cpu,
 					   c_usage->o_cpu, c_usage->r_cpu); 
 			} else {
 				xstrfmtcat(query, 
@@ -496,25 +490,26 @@ extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 					   "down_cpu_secs, idle_cpu_secs, "
 					   "over_cpu_secs, resv_cpu_secs) "
 					   "values (%d, %d, '%s', %d, %d, "
-					   "%d, %d, %d, %d, %d) "
-					   "on duplicate key update "
-					   "mod_time=%d, cpu_count=%d, "
-					   "alloc_cpu_secs=%d, "
-					   "down_cpu_secs=%d, "
-					   "idle_cpu_secs=%d, "
-					   "over_cpu_secs=%d, resv_cpu_secs=%d",
+					   "%llu, %llu, %llu, %llu, %llu)",
 					   cluster_hour_table, now, now, 
 					   c_usage->name, c_usage->start, 
-					   c_usage->cpu_count, c_usage->a_cpu,
-					   c_usage->d_cpu, c_usage->i_cpu,
-					   c_usage->o_cpu, c_usage->r_cpu,
-					   now,
-					   c_usage->cpu_count, c_usage->a_cpu,
+					   c_usage->cpu_count,
+					   c_usage->a_cpu,
 					   c_usage->d_cpu, c_usage->i_cpu,
 					   c_usage->o_cpu, c_usage->r_cpu); 
 			}
 		}
+
 		if(query) {
+			xstrfmtcat(query, 
+				   " on duplicate key update "
+				   "mod_time=%d, cpu_count=VALUES(cpu_count), "
+				   "alloc_cpu_secs=VALUES(alloc_cpu_secs), "
+				   "down_cpu_secs=VALUES(down_cpu_secs), "
+				   "idle_cpu_secs=VALUES(idle_cpu_secs), "
+				   "over_cpu_secs=VALUES(over_cpu_secs), "
+				   "resv_cpu_secs=VALUES(resv_cpu_secs)",
+				   now);
 			rc = mysql_db_query(mysql_conn->acct_mysql_db, query);
 			xfree(query);
 			if(rc != SLURM_SUCCESS) {
@@ -530,30 +525,28 @@ extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 /* 			     a_usage->a_cpu); */
 			if(query) {
 				xstrfmtcat(query, 
-					   ", (%d, %d, %d, %d, %d, "
-					   "%d, %d, %d, %d) "
-					   "on duplicate key update "
-					   "mod_time=%d, alloc_cpu_secs=%d",
+					   ", (%d, %d, %d, %d, %llu)",
 					   now, now, 
 					   a_usage->assoc_id, curr_start,
-					   a_usage->a_cpu,
-					   now, a_usage->a_cpu); 
+					   a_usage->a_cpu); 
 			} else {
 				xstrfmtcat(query, 
 					   "insert into %s (creation_time, "
 					   "mod_time, id, period_start, "
 					   "alloc_cpu_secs) values "
-					   "(%d, %d, %d, %d, %d) "
-					   "on duplicate key update "
-					   "mod_time=%d, alloc_cpu_secs=%d",
+					   "(%d, %d, %d, %d, %llu)",
 					   assoc_hour_table, now, now, 
 					   a_usage->assoc_id, curr_start,
-					   a_usage->a_cpu,
-					   now, a_usage->a_cpu); 
+					   a_usage->a_cpu); 
 			}
 		}
-		
 		if(query) {
+			xstrfmtcat(query, 
+				   " on duplicate key update "
+				   "mod_time=%d, "
+				   "alloc_cpu_secs=VALUES(alloc_cpu_secs)",
+				   now);
+					   	
 			debug3("%d query\n%s", mysql_conn->conn, query);
 			rc = mysql_db_query(mysql_conn->acct_mysql_db, query);
 			xfree(query);
@@ -606,8 +599,8 @@ extern int mysql_daily_rollup(mysql_conn_t *mysql_conn,
 
 	while(curr_start < end) {
 		debug3("curr day is now %d-%d", curr_start, curr_end);
-/* 	info("start %s", ctime(&curr_start)); */
-/* 	info("end %s", ctime(&curr_end)); */
+/* 		info("start %s", ctime(&curr_start)); */
+/* 		info("end %s", ctime(&curr_end)); */
 		query = xstrdup_printf(
 			"insert into %s (creation_time, mod_time, id, "
 			"period_start, alloc_cpu_secs) select %d, %d, id, "
@@ -702,8 +695,8 @@ extern int mysql_monthly_rollup(mysql_conn_t *mysql_conn,
 
 	while(curr_start < end) {
 		debug3("curr month is now %d-%d", curr_start, curr_end);
-/* 	info("start %s", ctime(&curr_start)); */
-/* 	info("end %s", ctime(&curr_end)); */
+/* 		info("start %s", ctime(&curr_start)); */
+/* 		info("end %s", ctime(&curr_end)); */
 		query = xstrdup_printf(
 			"insert into %s (creation_time, mod_time, id, "
 			"period_start, alloc_cpu_secs) select %d, %d, id, "
@@ -756,6 +749,20 @@ extern int mysql_monthly_rollup(mysql_conn_t *mysql_conn,
 		start_tm.tm_isdst = -1;
 		curr_end = mktime(&start_tm);
 	}
+
+	/* remove all data from event table that was older than
+	 * start. 
+	 */
+	query = xstrdup_printf("delete from %s where period_end < %d "
+			       "&& end != 0",
+			       event_table, start);
+	rc = mysql_db_query(mysql_conn->acct_mysql_db, query);
+	xfree(query);
+	if(rc != SLURM_SUCCESS) {
+		error("Couldn't remove old event data");
+		return SLURM_ERROR;
+	}
+
 	return SLURM_SUCCESS;
 }
 
diff --git a/src/plugins/accounting_storage/none/accounting_storage_none.c b/src/plugins/accounting_storage/none/accounting_storage_none.c
index 0701440e26b77f8f7577cd08981bfe852ce8e520..26a4c554d700cd548302bc18370be1d977fa21d2 100644
--- a/src/plugins/accounting_storage/none/accounting_storage_none.c
+++ b/src/plugins/accounting_storage/none/accounting_storage_none.c
@@ -109,7 +109,7 @@ extern int acct_storage_p_add_users(void *db_conn, uint32_t uid,
 }
 
 extern int acct_storage_p_add_coord(void *db_conn, uint32_t uid,
-				    char *acct, acct_user_cond_t *user_q)
+				    List acct_list, acct_user_cond_t *user_q)
 {
 	return SLURM_SUCCESS;
 }
@@ -167,7 +167,8 @@ extern List acct_storage_p_remove_users(void *db_conn, uint32_t uid,
 }
 
 extern List acct_storage_p_remove_coord(void *db_conn, uint32_t uid,
-				       char *acct, acct_user_cond_t *user_q)
+					List acct_list, 
+					acct_user_cond_t *user_q)
 {
 	return SLURM_SUCCESS;
 }
@@ -326,6 +327,16 @@ extern List jobacct_storage_p_get_jobs(void *db_conn,
 	return NULL;
 }
 
+/* 
+ * get info from the storage 
+ * returns List of jobacct_job_rec_t *
+ * note List needs to be freed when called
+ */
+extern List jobacct_storage_p_get_jobs_cond(void *db_conn, void *job_cond)
+{
+	return NULL;
+}
+
 /* 
  * expire old info from the storage 
  */
diff --git a/src/plugins/accounting_storage/pgsql/accounting_storage_pgsql.c b/src/plugins/accounting_storage/pgsql/accounting_storage_pgsql.c
index 0219e9517cda9b8fb6586246efe41598626ad738..b4a59425612e2399d99ba856702ca6749804b47c 100644
--- a/src/plugins/accounting_storage/pgsql/accounting_storage_pgsql.c
+++ b/src/plugins/accounting_storage/pgsql/accounting_storage_pgsql.c
@@ -763,7 +763,7 @@ extern int acct_storage_p_add_users(PGconn *acct_pgsql_db, uint32_t uid,
 }
 
 extern int acct_storage_p_add_coord(PGconn *acct_pgsql_db, uint32_t uid,
-				    char *acct, acct_user_cond_t *user_q)
+				    List acct_list, acct_user_cond_t *user_q)
 {
 	return SLURM_SUCCESS;
 }
@@ -822,7 +822,8 @@ extern List acct_storage_p_remove_users(PGconn *acct_pgsql_db, uint32_t uid,
 }
 
 extern List acct_storage_p_remove_coord(PGconn *acct_pgsql_db, uint32_t uid,
-					   char *acct, acct_user_cond_t *user_q)
+					List acct_list,
+					acct_user_cond_t *user_q)
 {
 	return SLURM_SUCCESS;
 }
@@ -1142,7 +1143,7 @@ extern int jobacct_storage_p_job_start(PGconn *acct_pgsql_db,
 		query = xstrdup_printf(
 			"update %s set partition='%s', blockid='%s', start=%d, "
 			"name='%s', state=%u, alloc_cpus=%u, nodelist='%s', "
-			"account='%s' where id=%d",
+			"account='%s', end=0 where id=%d",
 			job_table, job_ptr->partition, block_id,
 			(int)job_ptr->start_time,
 			jname, 
@@ -1296,9 +1297,9 @@ extern int jobacct_storage_p_step_start(PGconn *acct_pgsql_db,
 	/* we want to print a -1 for the requid so leave it a
 	   %d */
 	query = xstrdup_printf(
-		"insert into %s (id, stepid, start, name, state, "
+		"insert into %s (id, stepid, start, end, name, state, "
 		"cpus, nodelist) "
-		"values (%d, %u, %u, '%s', %d, %u, '%s')",
+		"values (%d, %u, %u, 0, '%s', %d, %u, '%s')",
 		step_table, step_ptr->job_ptr->db_index,
 		step_ptr->step_id, 
 		(int)step_ptr->start_time, step_ptr->name,
@@ -1509,20 +1510,60 @@ extern int jobacct_storage_p_suspend(PGconn *acct_pgsql_db,
 extern List jobacct_storage_p_get_jobs(PGconn *acct_pgsql_db,
 				       List selected_steps,
 				       List selected_parts,
-				       void *params)
+				       sacct_parameters_t *params)
 {
 	List job_list = NULL;
 #ifdef HAVE_PGSQL
+	acct_job_cond_t job_cond;
+	struct passwd *pw = NULL;
+
+	if(!acct_pgsql_db || PQstatus(acct_pgsql_db) != CONNECTION_OK) {
+		if(!pgsql_get_db_connection(&acct_pgsql_db,
+					    pgsql_db_name, pgsql_db_info))
+			return job_list;
+	}
+
+	memset(&job_cond, 0, sizeof(acct_job_cond_t));
+
+	job_cond.step_list = selected_steps;
+	job_cond.partition_list = selected_parts;
+	if(params->opt_cluster) {
+		job_cond.cluster_list = list_create(NULL);
+		list_append(job_cond.cluster_list, params->opt_cluster);
+	}
+
+	if (params->opt_uid >=0 && (pw=getpwuid(params->opt_uid))) {
+		job_cond.user_list = list_create(NULL);
+		list_append(job_cond.user_list, pw->pw_name);
+	}	
+
+	job_list = pgsql_jobacct_process_get_jobs(acct_pgsql_db, &job_cond);	
+
+	if(job_cond.user_list)
+		list_destroy(job_cond.user_list);
+	if(job_cond.cluster_list)
+		list_destroy(job_cond.cluster_list);
+#endif
+	return job_list;
+}
+
+/* 
+ * get info from the storage 
+ * returns List of job_rec_t *
+ * note List needs to be freed when called
+ */
+extern List jobacct_storage_p_get_jobs_cond(PGconn *acct_pgsql_db, 
+					    acct_job_cond_t *job_cond)
+{
+	List job_list = NULL;
+#ifdef HAVE_MYSQL
 	if(!acct_pgsql_db || PQstatus(acct_pgsql_db) != CONNECTION_OK) {
 		if(!pgsql_get_db_connection(&acct_pgsql_db,
 					    pgsql_db_name, pgsql_db_info))
 			return job_list;
 	}
 
-	job_list = pgsql_jobacct_process_get_jobs(acct_pgsql_db,
-						  selected_steps, 
-						  selected_parts,
-						  params);
+	job_list = pgsql_jobacct_process_get_jobs(acct_pgsql_db, job_cond);	
 #endif
 	return job_list;
 }
diff --git a/src/plugins/accounting_storage/pgsql/pgsql_jobacct_process.c b/src/plugins/accounting_storage/pgsql/pgsql_jobacct_process.c
index c1ff95fb8f8046343f1c1356f5d1835a9a764ee6..94e97377b535b0848f91bd91781554aa1dc3d9ae 100644
--- a/src/plugins/accounting_storage/pgsql/pgsql_jobacct_process.c
+++ b/src/plugins/accounting_storage/pgsql/pgsql_jobacct_process.c
@@ -44,25 +44,19 @@
 #include "pgsql_jobacct_process.h"
 
 #ifdef HAVE_PGSQL
-static void _do_fdump(List job_list)
-{
-	info("fdump option not applicable from pgsql plugin");
-	return;
-}
 
 extern List pgsql_jobacct_process_get_jobs(PGconn *acct_pgsql_db,
-					   List selected_steps,
-					   List selected_parts,
-					   sacct_parameters_t *params)
+					   acct_job_cond_t *job_cond)
 {
 
 	char *query = NULL;	
 	char *extra = NULL;	
 	char *tmp = NULL;	
-	char *selected_part = NULL;
+	char *object = NULL;
 	jobacct_selected_step_t *selected_step = NULL;
 	ListIterator itr = NULL;
 	int set = 0;
+	char *table_level="t2";
 	PGresult *result = NULL, *step_result = NULL;
 	int i, j;
 	jobacct_job_rec_t *job = NULL;
@@ -96,6 +90,9 @@ extern List pgsql_jobacct_process_get_jobs(PGconn *acct_pgsql_db,
 		"t1.nodelist",
 		"t1.kill_requid",
 		"t1.qos",
+		"t2.user_name",
+		"t2.cluster",
+		"t2.lft"
 	};
 
 	/* if this changes you will need to edit the corresponding 
@@ -157,6 +154,9 @@ extern List pgsql_jobacct_process_get_jobs(PGconn *acct_pgsql_db,
 		JOB_REQ_NODELIST,
 		JOB_REQ_KILL_REQUID,
 		JOB_REQ_QOS,
+		JOB_REQ_USER_NAME,
+		JOB_REQ_CLUSTER,
+		JOB_REQ_LFT,
 		JOB_REQ_COUNT		
 	};
 	enum {
@@ -193,56 +193,176 @@ extern List pgsql_jobacct_process_get_jobs(PGconn *acct_pgsql_db,
 		STEP_REQ_COUNT
 	};
 
-	if(selected_steps && list_count(selected_steps)) {
+	if(!job_cond)
+		goto no_cond;
+
+	/* THIS ASSOCID CHECK ALWAYS NEEDS TO BE FIRST!!!!!!! */
+	if(job_cond->associd_list && list_count(job_cond->associd_list)) {
 		set = 0;
-		xstrcat(extra, " and (");
-		itr = list_iterator_create(selected_steps);
-		while((selected_step = list_next(itr))) {
+		xstrfmtcat(extra, ", %s as t3 where (");
+		itr = list_iterator_create(job_cond->associd_list);
+		while((object = list_next(itr))) {
 			if(set) 
 				xstrcat(extra, " or ");
-			tmp = xstrdup_printf("t1.jobid=%u",
-					      selected_step->jobid);
-			xstrcat(extra, tmp);
+			xstrfmtcat(extra, "t1.associd=%s", object);
 			set = 1;
-			xfree(tmp);
 		}
 		list_iterator_destroy(itr);
 		xstrcat(extra, ")");
+		table_level="t3";
+		/* just incase the association is gone */
+		if(set) 
+			xstrcat(extra, " || ");
+		xstrfmtcat(extra, "t3.id is null) and "
+			   "(t2.lft between t3.lft and t3.rgt "
+			   "or t2.lft is null)");
 	}
 
-	if(selected_parts && list_count(selected_parts)) {
+	if(job_cond->acct_list && list_count(job_cond->acct_list)) {
 		set = 0;
-		xstrcat(extra, " and (");
-		itr = list_iterator_create(selected_parts);
-		while((selected_part = list_next(itr))) {
+		if(extra)
+			xstrcat(extra, " and (");
+		else
+			xstrcat(extra, " where (");
+		itr = list_iterator_create(job_cond->acct_list);
+		while((object = list_next(itr))) {
 			if(set) 
 				xstrcat(extra, " or ");
-			tmp = xstrdup_printf("t1.partition='%s'",
-					      selected_part);
-			xstrcat(extra, tmp);
+			xstrfmtcat(extra, "t1.acct='%s'", object);
 			set = 1;
-			xfree(tmp);
 		}
 		list_iterator_destroy(itr);
 		xstrcat(extra, ")");
 	}
-	
-	for(i=0; i<JOB_REQ_COUNT; i++) {
-		if(i) 
-			xstrcat(tmp, ", ");
-		xstrcat(tmp, job_req_inx[i]);
+
+	if(job_cond->groupid_list && list_count(job_cond->groupid_list)) {
+		set = 0;
+		if(extra)
+			xstrcat(extra, " and (");
+		else
+			xstrcat(extra, " where (");
+		itr = list_iterator_create(job_cond->groupid_list);
+		while((object = list_next(itr))) {
+			if(set) 
+				xstrcat(extra, " or ");
+			xstrfmtcat(extra, "t1.gid=", object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(extra, ")");
+	}
+
+	if(job_cond->partition_list && list_count(job_cond->partition_list)) {
+		set = 0;
+		if(extra)
+			xstrcat(extra, " and (");
+		else
+			xstrcat(extra, " where (");
+		itr = list_iterator_create(job_cond->partition_list);
+		while((object = list_next(itr))) {
+			if(set) 
+				xstrcat(extra, " or ");
+			xstrfmtcat(extra, "t1.partition='%s'", object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(extra, ")");
+	}
+
+	if(job_cond->step_list && list_count(job_cond->step_list)) {
+		set = 0;
+		if(extra)
+			xstrcat(extra, " and (");
+		else
+			xstrcat(extra, " where (");
+		itr = list_iterator_create(job_cond->step_list);
+		while((selected_step = list_next(itr))) {
+			if(set) 
+				xstrcat(extra, " or ");
+			xstrfmtcat(extra, "t1.jobid=%u", selected_step->jobid);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(extra, ")");
+	}
+
+	if(job_cond->usage_start) {
+		if(!job_cond->usage_end)
+			job_cond->usage_end = time(NULL);
+
+		if(extra)
+			xstrcat(extra, " and (");
+		else
+			xstrcat(extra, " where (");
+		xstrfmtcat(extra, 
+			   "(t1.eligible < %d and (endtime >= %d "
+			   "or endtime = 0)))",
+			   job_cond->usage_end, job_cond->usage_start);
+	}
+
+	/* we need to put all the associations (t2) stuff together here */
+	if(job_cond->cluster_list && list_count(job_cond->cluster_list)) {
+		set = 0;
+		if(extra)
+			xstrcat(extra, " and (");
+		else
+			xstrcat(extra, " where (");
+
+		itr = list_iterator_create(job_cond->cluster_list);
+		while((object = list_next(itr))) {
+			if(set) 
+				xstrcat(extra, " or ");
+			xstrfmtcat(extra, "%s.cluster='%s'", 
+				   table_level, object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		/* just incase the association is gone */
+		if(set) 
+			xstrcat(extra, " or ");
+		xstrfmtcat(extra, "%s.cluster is null)", table_level);
+	}
+
+	if(job_cond->user_list && list_count(job_cond->user_list)) {
+		set = 0;
+		if(extra)
+			xstrcat(extra, " and (");
+		else
+			xstrcat(extra, " where (");
+
+		itr = list_iterator_create(job_cond->user_list);
+		while((object = list_next(itr))) {
+			if(set) 
+				xstrcat(extra, " or ");
+			xstrfmtcat(extra, "%s.user_name='%s'",
+				   table_level, object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		/* just incase the association is gone */
+		if(set) 
+			xstrcat(extra, " or ");
+		xstrfmtcat(extra, "%s.user_name is null)", table_level);
+	}
+
+no_cond:	
+
+	xfree(tmp);
+	xstrfmtcat(tmp, "%s", job_req_inx[0]);
+	for(i=1; i<JOB_REQ_COUNT; i++) {
+		xstrfmtcat(tmp, ", %s", job_req_inx[i]);
 	}
 	
-	query = xstrdup_printf("select %s from %s t1",
-			       tmp, job_table);
+	query = xstrdup_printf("select %s from %s as t1 left join %s as t2 "
+			       "on t1.associd=t2.id",
+			       tmp, job_table, assoc_table);
 	xfree(tmp);
-
 	if(extra) {
 		xstrcat(query, extra);
 		xfree(extra);
 	}
 
-	//info("query = %s", query);
+	debug3("query\n%s", query);
 	if(!(result = pgsql_db_query_ret(acct_pgsql_db, query))) {
 		xfree(query);
 		list_destroy(job_list);
@@ -252,35 +372,33 @@ extern List pgsql_jobacct_process_get_jobs(PGconn *acct_pgsql_db,
 
 	for (i = 0; i < PQntuples(result); i++) {
 		char *id = PQgetvalue(result, i, JOB_REQ_ID);
-		acct_association_rec_t account_rec;
-		memset(&account_rec, 0, sizeof(acct_association_rec_t));
+
 		job = create_jobacct_job_rec();
 
 		job->alloc_cpus = atoi(PQgetvalue(result, i, 
 						  JOB_REQ_ALLOC_CPUS));
 		job->associd = atoi(PQgetvalue(result, i, JOB_REQ_ASSOCID));
-		account_rec.id = job->associd;
-		assoc_mgr_fill_in_assoc(acct_pgsql_db, &account_rec, 0, NULL);
-		if(account_rec.cluster) {
-			if(params->opt_cluster &&
-			   strcmp(params->opt_cluster, account_rec.cluster)) {
-				destroy_jobacct_job_rec(job);
-				job = NULL;
-				continue;
-			}
-			job->cluster = xstrdup(account_rec.cluster);
-		}
-		if(account_rec.user) 
-			job->user = xstrdup(account_rec.user);
-		else 
+		job->cluster = xstrdup(PQgetvalue(result, i, JOB_REQ_CLUSTER));
+		if(job->cluster && !job->cluster[0]) 
+			xfree(job->cluster);
+
+		job->user =  xstrdup(PQgetvalue(result, i, JOB_REQ_USER_NAME));
+		if(!job->user || !job->user[0]) 
 			job->uid = atoi(PQgetvalue(result, i, JOB_REQ_UID));
-		if(account_rec.acct) 
-			job->account = xstrdup(account_rec.acct);
-		else
-			job->account = xstrdup(PQgetvalue(result, i,
-							  JOB_REQ_ACCOUNT));
+
+		job->lft = atoi(PQgetvalue(result, i, JOB_REQ_LFT));
+
+		if(!job->lft)
+			job->lft = (uint32_t)NO_VAL;
+
+		job->account = xstrdup(PQgetvalue(result, i, JOB_REQ_ACCOUNT));
+		if(job->account && !job->account[0]) 
+			xfree(job->account);
+
 		job->blockid = xstrdup(PQgetvalue(result, i,
-						    JOB_REQ_BLOCKID));
+						  JOB_REQ_BLOCKID));
+		if(!job->blockid || !job->blockid[0]) 
+			xfree(job->blockid);
 		job->eligible = atoi(PQgetvalue(result, i, JOB_REQ_SUBMIT));
 		job->submit = atoi(PQgetvalue(result, i, JOB_REQ_SUBMIT));
 		job->start = atoi(PQgetvalue(result, i, JOB_REQ_START));
@@ -316,9 +434,10 @@ extern List pgsql_jobacct_process_get_jobs(PGconn *acct_pgsql_db,
 
 		list_append(job_list, job);
 
-		if(selected_steps && list_count(selected_steps)) {
+		if(job_cond && job_cond->step_list && 
+		   list_count(job_cond->step_list)) {
 			set = 0;
-			itr = list_iterator_create(selected_steps);
+			itr = list_iterator_create(job_cond->step_list);
 			while((selected_step = list_next(itr))) {
 				if(selected_step->jobid != job->jobid) {
 					continue;
@@ -333,11 +452,9 @@ extern List pgsql_jobacct_process_get_jobs(PGconn *acct_pgsql_db,
 				else 
 					xstrcat(extra, " and (");
 			
-				tmp = xstrdup_printf("t1.stepid=%u",
-						     selected_step->stepid);
-				xstrcat(extra, tmp);
+				xstrfmtcat(extra, "t1.stepid=%u",
+					   selected_step->stepid);
 				set = 1;
-				xfree(tmp);
 				job->show_full = 0;
 			}
 			list_iterator_destroy(itr);
@@ -382,7 +499,7 @@ extern List pgsql_jobacct_process_get_jobs(PGconn *acct_pgsql_db,
 			step->ncpus = atoi(
 				PQgetvalue(step_result, j, STEP_REQ_CPUS));
 			step->start = atoi(
-				PQgetvalue(step_result, j, JOB_REQ_START));
+				PQgetvalue(step_result, j, STEP_REQ_START));
 			step->end = atoi(
 				PQgetvalue(step_result, j, STEP_REQ_ENDTIME));
 			/* figure this out by start stop */
@@ -475,9 +592,6 @@ extern List pgsql_jobacct_process_get_jobs(PGconn *acct_pgsql_db,
 	}
 	PQclear(result);
 	
-	if (params && params->opt_fdump) 
-		_do_fdump(job_list);
-	
 	return job_list;
 }
 
diff --git a/src/plugins/accounting_storage/pgsql/pgsql_jobacct_process.h b/src/plugins/accounting_storage/pgsql/pgsql_jobacct_process.h
index 255f1a9d78a35770f0e8d18634212f3a4a389e98..19575d88b4eb473faec6f15adb4c14a40ef12c4b 100644
--- a/src/plugins/accounting_storage/pgsql/pgsql_jobacct_process.h
+++ b/src/plugins/accounting_storage/pgsql/pgsql_jobacct_process.h
@@ -55,13 +55,12 @@
 
 #ifdef HAVE_PGSQL
 
+extern char *assoc_table;
 extern char *job_table;
 extern char *step_table;
 
 extern List pgsql_jobacct_process_get_jobs(PGconn *acct_pgsql_db,
-					   List selected_steps,
-					   List selected_parts,
-					   sacct_parameters_t *params);
+					   acct_job_cond_t *job_cond);
 
 extern void pgsql_jobacct_process_archive(PGconn *acct_pgsql_db,
 					  List selected_parts,
diff --git a/src/plugins/accounting_storage/slurmdbd/accounting_storage_slurmdbd.c b/src/plugins/accounting_storage/slurmdbd/accounting_storage_slurmdbd.c
index 3dd41ad223dd35fdee5027640a23c45642f576b5..cbfba517fce818563e7e7f3beac6b7b935697690 100644
--- a/src/plugins/accounting_storage/slurmdbd/accounting_storage_slurmdbd.c
+++ b/src/plugins/accounting_storage/slurmdbd/accounting_storage_slurmdbd.c
@@ -183,13 +183,13 @@ extern int acct_storage_p_add_users(void *db_conn, uint32_t uid, List user_list)
 }
 
 extern int acct_storage_p_add_coord(void *db_conn, uint32_t uid,
-				    char *acct, acct_user_cond_t *user_q)
+				    List acct_list, acct_user_cond_t *user_q)
 {
 	slurmdbd_msg_t req;
 	dbd_acct_coord_msg_t get_msg;
 	int rc, resp_code;
 
-	get_msg.acct = acct;
+	get_msg.acct_list = acct_list;
 	get_msg.cond = user_q;
 
 	req.msg_type = DBD_ADD_ACCOUNT_COORDS;
@@ -277,7 +277,15 @@ extern List acct_storage_p_modify_users(void *db_conn, uint32_t uid,
 
 	if (rc != SLURM_SUCCESS)
 		error("slurmdbd: DBD_MODIFY_USERS failure: %m");
-	else if (resp.msg_type != DBD_GOT_LIST) {
+	else if (resp.msg_type == DBD_RC) {
+		dbd_rc_msg_t *msg = resp.data;
+		if(msg->return_code == SLURM_SUCCESS) {
+			info("%s", msg->comment);
+			ret_list = list_create(NULL);
+		} else
+			error("%s", msg->comment);
+		slurmdbd_free_rc_msg(msg);
+	} else if (resp.msg_type != DBD_GOT_LIST) {
 		error("slurmdbd: response type not DBD_GOT_LIST: %u", 
 		      resp.msg_type);
 	} else {
@@ -309,7 +317,15 @@ extern List acct_storage_p_modify_accts(void *db_conn, uint32_t uid,
 
 	if (rc != SLURM_SUCCESS)
 		error("slurmdbd: DBD_MODIFY_ACCOUNTS failure: %m");
-	else if (resp.msg_type != DBD_GOT_LIST) {
+	else if (resp.msg_type == DBD_RC) {
+		dbd_rc_msg_t *msg = resp.data;
+		if(msg->return_code == SLURM_SUCCESS) {
+			info("%s", msg->comment);
+			ret_list = list_create(NULL);
+		} else
+			error("%s", msg->comment);
+		slurmdbd_free_rc_msg(msg);
+	} else if (resp.msg_type != DBD_GOT_LIST) {
 		error("slurmdbd: response type not DBD_GOT_LIST: %u", 
 		      resp.msg_type);
 	} else {
@@ -343,7 +359,15 @@ extern List acct_storage_p_modify_clusters(void *db_conn, uint32_t uid,
 
 	if (rc != SLURM_SUCCESS)
 		error("slurmdbd: DBD_MODIFY_CLUSTERS failure: %m");
-	else if (resp.msg_type != DBD_GOT_LIST) {
+	else if (resp.msg_type == DBD_RC) {
+		dbd_rc_msg_t *msg = resp.data;
+		if(msg->return_code == SLURM_SUCCESS) {
+			info("%s", msg->comment);
+			ret_list = list_create(NULL);
+		} else
+			error("%s", msg->comment);
+		slurmdbd_free_rc_msg(msg);
+	} else if (resp.msg_type != DBD_GOT_LIST) {
 		error("slurmdbd: response type not DBD_GOT_LIST: %u", 
 		      resp.msg_type);
 	} else {
@@ -377,7 +401,15 @@ extern List acct_storage_p_modify_associations(void *db_conn, uint32_t uid,
 
 	if (rc != SLURM_SUCCESS)
 		error("slurmdbd: DBD_MODIFY_ASSOCS failure: %m");
-	else if (resp.msg_type != DBD_GOT_LIST) {
+	else if (resp.msg_type == DBD_RC) {
+		dbd_rc_msg_t *msg = resp.data;
+		if(msg->return_code == SLURM_SUCCESS) {
+			info("%s", msg->comment);
+			ret_list = list_create(NULL);
+		} else
+			error("%s", msg->comment);
+		slurmdbd_free_rc_msg(msg);
+	} else if (resp.msg_type != DBD_GOT_LIST) {
 		error("slurmdbd: response type not DBD_GOT_LIST: %u", 
 		      resp.msg_type);
 	} else {
@@ -409,7 +441,15 @@ extern List acct_storage_p_remove_users(void *db_conn, uint32_t uid,
 
 	if (rc != SLURM_SUCCESS)
 		error("slurmdbd: DBD_REMOVE_USERS failure: %m");
-	else if (resp.msg_type != DBD_GOT_LIST) {
+	else if (resp.msg_type == DBD_RC) {
+		dbd_rc_msg_t *msg = resp.data;
+		if(msg->return_code == SLURM_SUCCESS) {
+			info("%s", msg->comment);
+			ret_list = list_create(NULL);
+		} else
+			error("%s", msg->comment);
+		slurmdbd_free_rc_msg(msg);
+	} else if (resp.msg_type != DBD_GOT_LIST) {
 		error("slurmdbd: response type not DBD_GOT_LIST: %u", 
 		      resp.msg_type);
 	} else {
@@ -423,7 +463,8 @@ extern List acct_storage_p_remove_users(void *db_conn, uint32_t uid,
 }
 
 extern List acct_storage_p_remove_coord(void *db_conn, uint32_t uid,
-				       char *acct, acct_user_cond_t *user_q)
+					List acct_list,
+					acct_user_cond_t *user_q)
 {
 	slurmdbd_msg_t req;
 	dbd_acct_coord_msg_t get_msg;
@@ -433,7 +474,7 @@ extern List acct_storage_p_remove_coord(void *db_conn, uint32_t uid,
 	List ret_list = NULL;
 
 
-	get_msg.acct = acct;
+	get_msg.acct_list = acct_list;
 	get_msg.cond = user_q;
 
 	req.msg_type = DBD_REMOVE_ACCOUNT_COORDS;
@@ -442,7 +483,15 @@ extern List acct_storage_p_remove_coord(void *db_conn, uint32_t uid,
 
 	if (rc != SLURM_SUCCESS)
 		error("slurmdbd: DBD_REMOVE_ACCOUNT_COORDS failure: %m");
-	else if (resp.msg_type != DBD_GOT_LIST) {
+	else if (resp.msg_type == DBD_RC) {
+		dbd_rc_msg_t *msg = resp.data;
+		if(msg->return_code == SLURM_SUCCESS) {
+			info("%s", msg->comment);
+			ret_list = list_create(NULL);
+		} else
+			error("%s", msg->comment);
+		slurmdbd_free_rc_msg(msg);
+	} else if (resp.msg_type != DBD_GOT_LIST) {
 		error("slurmdbd: response type not DBD_GOT_LIST: %u", 
 		      resp.msg_type);
 	} else {
@@ -474,7 +523,15 @@ extern List acct_storage_p_remove_accts(void *db_conn, uint32_t uid,
 
 	if (rc != SLURM_SUCCESS)
 		error("slurmdbd: DBD_REMOVE_ACCTS failure: %m");
-	else if (resp.msg_type != DBD_GOT_LIST) {
+	else if (resp.msg_type == DBD_RC) {
+		dbd_rc_msg_t *msg = resp.data;
+		if(msg->return_code == SLURM_SUCCESS) {
+			info("%s", msg->comment);
+			ret_list = list_create(NULL);
+		} else
+			error("%s", msg->comment);
+		slurmdbd_free_rc_msg(msg);
+	} else if (resp.msg_type != DBD_GOT_LIST) {
 		error("slurmdbd: response type not DBD_GOT_LIST: %u", 
 		      resp.msg_type);
 	} else {
@@ -506,7 +563,15 @@ extern List acct_storage_p_remove_clusters(void *db_conn, uint32_t uid,
 
 	if (rc != SLURM_SUCCESS)
 		error("slurmdbd: DBD_REMOVE_CLUSTERS failure: %m");
-	else if (resp.msg_type != DBD_GOT_LIST) {
+	else if (resp.msg_type == DBD_RC) {
+		dbd_rc_msg_t *msg = resp.data;
+		if(msg->return_code == SLURM_SUCCESS) {
+			info("%s", msg->comment);
+			ret_list = list_create(NULL);
+		} else
+			error("%s", msg->comment);
+		slurmdbd_free_rc_msg(msg);
+	} else if (resp.msg_type != DBD_GOT_LIST) {
 		error("slurmdbd: response type not DBD_GOT_LIST: %u", 
 		      resp.msg_type);
 	} else {
@@ -538,7 +603,15 @@ extern List acct_storage_p_remove_associations(void *db_conn, uint32_t uid,
 
 	if (rc != SLURM_SUCCESS)
 		error("slurmdbd: DBD_REMOVE_ASSOCS failure: %m");
-	else if (resp.msg_type != DBD_GOT_LIST) {
+	else if (resp.msg_type == DBD_RC) {
+		dbd_rc_msg_t *msg = resp.data;
+		if(msg->return_code == SLURM_SUCCESS) {
+			info("%s", msg->comment);
+			ret_list = list_create(NULL);
+		} else
+			error("%s", msg->comment);
+		slurmdbd_free_rc_msg(msg);
+	} else if (resp.msg_type != DBD_GOT_LIST) {
 		error("slurmdbd: response type not DBD_GOT_LIST: %u", 
 		      resp.msg_type);
 	} else {
@@ -794,7 +867,8 @@ extern int clusteracct_storage_p_cluster_procs(void *db_conn,
 {
 	slurmdbd_msg_t msg;
 	dbd_cluster_procs_msg_t req;
-	info("sending info for cluster %s", cluster);
+
+	debug2("Sending info for cluster %s", cluster);
 	req.cluster_name = cluster;
 	req.proc_count   = procs;
 	req.event_time   = event_time;
@@ -812,7 +886,8 @@ extern int clusteracct_storage_p_register_ctld(char *cluster,
 {
 	slurmdbd_msg_t msg;
 	dbd_register_ctld_msg_t req;
-	info("registering slurmctld for cluster %s at port %u", cluster, port);
+	info("Registering slurmctld for cluster %s at port %u with slurmdbd.",
+	     cluster, port);
 	req.cluster_name = cluster;
 	req.port         = port;
 	msg.msg_type     = DBD_REGISTER_CTLD;
@@ -1093,6 +1168,7 @@ extern int jobacct_storage_p_step_complete(void *db_conn,
 	req.assoc_id    = step_ptr->job_ptr->assoc_id;
 	req.db_index    = step_ptr->job_ptr->db_index;
 	req.end_time    = time(NULL);	/* called at step completion */
+	req.exit_code   = step_ptr->exit_code;
 	req.jobacct     = step_ptr->jobacct;
 	req.job_id      = step_ptr->job_ptr->job_id;
 	req.req_uid     = step_ptr->job_ptr->requid;
@@ -1182,6 +1258,41 @@ extern List jobacct_storage_p_get_jobs(void *db_conn,
 	return job_list;
 }
 
+/* 
+ * get info from the storage 
+ * returns List of job_rec_t *
+ * note List needs to be freed when called
+ */
+extern List jobacct_storage_p_get_jobs_cond(void *db_conn,
+					    acct_job_cond_t *job_cond)
+{
+	slurmdbd_msg_t req, resp;
+	dbd_cond_msg_t get_msg;
+	dbd_list_msg_t *got_msg;
+	int rc;
+	List job_list = NULL;
+		
+	get_msg.cond = job_cond;
+
+	req.msg_type = DBD_GET_JOBS_COND;
+	req.data = &get_msg;
+	rc = slurm_send_recv_slurmdbd_msg(&req, &resp);
+
+	if (rc != SLURM_SUCCESS)
+		error("slurmdbd: DBD_GET_JOBS_COND failure: %m");
+	else if (resp.msg_type != DBD_GOT_JOBS) {
+		error("slurmdbd: response type not DBD_GOT_JOBS: %u", 
+		      resp.msg_type);
+	} else {
+		got_msg = (dbd_list_msg_t *) resp.data;
+		job_list = got_msg->my_list;
+		got_msg->my_list = NULL;
+		slurmdbd_free_list_msg(got_msg);
+	}
+
+	return job_list;
+}
+
 /* 
  * Expire old info from the storage
  * Not applicable for any database
diff --git a/src/plugins/auth/munge/auth_munge.c b/src/plugins/auth/munge/auth_munge.c
index 1a5a7f51754b5f9891b77923b3b555fcb393d869..c460bf6816e62cfb78a4f8f5a6c853768a3d703e 100644
--- a/src/plugins/auth/munge/auth_munge.c
+++ b/src/plugins/auth/munge/auth_munge.c
@@ -1,6 +1,6 @@
 /*****************************************************************************\
  *  auth_munge.c - SLURM auth implementation via Chris Dunlap's Munge
- *  $Id: auth_munge.c 13672 2008-03-19 23:10:58Z jette $
+ *  $Id: auth_munge.c 14319 2008-06-23 21:28:52Z jette $
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008 Lawrence Livermore National Security.
@@ -73,7 +73,7 @@
 
 #define MUNGE_ERRNO_OFFSET	1000
 
-const char plugin_name[]       	= "auth plugin for Munge (Chris Dunlap, LLNL)";
+const char plugin_name[]       	= "auth plugin for Munge (http://home.gna.org/munge/)";
 const char plugin_type[]       	= "auth/munge";
 const uint32_t plugin_version	= 10;
 
diff --git a/src/plugins/jobacct_gather/aix/jobacct_gather_aix.c b/src/plugins/jobacct_gather/aix/jobacct_gather_aix.c
index dc0b0ca354b12b0e73cfd8d4b4a3bbbd53b8a117..2abb0afd2fa6f88ef46583d33bf6d14ded1f1bf0 100644
--- a/src/plugins/jobacct_gather/aix/jobacct_gather_aix.c
+++ b/src/plugins/jobacct_gather/aix/jobacct_gather_aix.c
@@ -519,7 +519,10 @@ extern struct jobacctinfo *jobacct_gather_p_stat_task(pid_t pid)
 #ifdef HAVE_AIX
 	_get_process_data();
 #endif
-	return jobacct_common_stat_task(pid);
+	if(pid)
+		return jobacct_common_stat_task(pid);
+	else
+		return NULL;
 }
 
 extern struct jobacctinfo *jobacct_gather_p_remove_task(pid_t pid)
diff --git a/src/plugins/jobacct_gather/linux/jobacct_gather_linux.c b/src/plugins/jobacct_gather/linux/jobacct_gather_linux.c
index d8f19f1ff84f5f2509b717d58bf2875274c0defa..ba5abdc5080e2641007f71e317ca9a6f5f68b6c5 100644
--- a/src/plugins/jobacct_gather/linux/jobacct_gather_linux.c
+++ b/src/plugins/jobacct_gather/linux/jobacct_gather_linux.c
@@ -296,7 +296,7 @@ static void _get_process_data() {
 		slurm_mutex_unlock(&reading_mutex);
 	
 	}
-		
+
 	if (!list_count(prec_list)) {
 		goto finished;	/* We have no business being here! */
 	}
@@ -561,6 +561,8 @@ extern int jobacct_gather_p_startpoll(uint16_t frequency)
 	debug("jobacct-gather: frequency = %d", frequency);
 		
 	jobacct_shutdown = false;
+
+	task_list = list_create(jobacct_common_free_jobacct);
 	
 	if (frequency == 0) {	/* don't want dynamic monitoring? */
 		debug2("jobacct-gather LINUX dynamic logging disabled");
@@ -568,8 +570,6 @@ extern int jobacct_gather_p_startpoll(uint16_t frequency)
 	}
 
 	freq = frequency;
-	task_list = list_create(jobacct_common_free_jobacct);
-	
 	/* create polling thread */
 	slurm_attr_init(&attr);
 	if (pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED))
@@ -639,7 +639,10 @@ extern int jobacct_gather_p_add_task(pid_t pid, jobacct_id_t *jobacct_id)
 extern struct jobacctinfo *jobacct_gather_p_stat_task(pid_t pid)
 {
 	_get_process_data();
-	return jobacct_common_stat_task(pid);
+	if(pid)
+		return jobacct_common_stat_task(pid);
+	else
+		return NULL;
 }
 
 extern struct jobacctinfo *jobacct_gather_p_remove_task(pid_t pid)
diff --git a/src/plugins/jobcomp/script/jobcomp_script.c b/src/plugins/jobcomp/script/jobcomp_script.c
index 660074beae09e809e7724aa6a2fb7bbfb9ff95ab..503da6ce607846c50a4592dbbf3b287c8475db29 100644
--- a/src/plugins/jobcomp/script/jobcomp_script.c
+++ b/src/plugins/jobcomp/script/jobcomp_script.c
@@ -1,6 +1,6 @@
 /*****************************************************************************\
  *  jobcomp_script.c - Script running slurm job completion logging plugin.
- *  $Id: jobcomp_script.c 14054 2008-05-14 17:06:31Z da $
+ *  $Id: jobcomp_script.c 14242 2008-06-11 23:29:49Z jette $
  *****************************************************************************
  *  Produced at Center for High Performance Computing, North Dakota State
  *  University
@@ -285,6 +285,7 @@ static int _env_append_fmt (char ***envp, const char *name,
 static char ** _create_environment (struct jobcomp_info *job)
 {
 	char **env;
+	char *tz;
 
 	env = xmalloc (1 * sizeof (*env));
 	env[0] = NULL;
@@ -310,6 +311,8 @@ static char ** _create_environment (struct jobcomp_info *job)
 	else 
 		_env_append_fmt (&env, "LIMIT", "%lu", job->limit);
 
+	if ((tz = getenv ("TZ")))
+		_env_append_fmt (&env, "TZ", "%s", tz);
 #ifdef _PATH_STDPATH
 	_env_append (&env, "PATH", _PATH_STDPATH);
 #else
diff --git a/src/plugins/sched/backfill/backfill.c b/src/plugins/sched/backfill/backfill.c
index 008607474f7c7b53963d6b044ceaea5c8e858943..207c906a7fcba3eb81883e9c71ae88ea695c458a 100644
--- a/src/plugins/sched/backfill/backfill.c
+++ b/src/plugins/sched/backfill/backfill.c
@@ -58,6 +58,7 @@
 
 #include "slurm/slurm.h"
 #include "slurm/slurm_errno.h"
+
 #include "src/common/list.h"
 #include "src/common/macros.h"
 #include "src/common/node_select.h"
@@ -65,6 +66,8 @@
 #include "src/common/slurm_protocol_api.h"
 #include "src/common/xmalloc.h"
 #include "src/common/xstring.h"
+
+#include "src/slurmctld/acct_policy.h"
 #include "src/slurmctld/job_scheduler.h"
 #include "src/slurmctld/licenses.h"
 #include "src/slurmctld/locks.h"
@@ -244,6 +247,8 @@ static void _attempt_backfill(void)
 		if ((part_ptr->root_only) && filter_root)
 			continue;
 
+		if (!acct_policy_job_runnable(job_ptr))
+			continue;
 		if (license_job_test(job_ptr) != SLURM_SUCCESS)
 			continue;
 		if (job_independent(job_ptr) != true)
@@ -277,6 +282,7 @@ static void _attempt_backfill(void)
 		/* Identify usable nodes for this job */
 		FREE_NULL_BITMAP(avail_bitmap);
 		avail_bitmap = bit_copy(part_ptr->node_bitmap);
+		bit_and(avail_bitmap, up_node_bitmap);
 		for (j=0; ; ) {
 			if (node_space[j].end_time <= end_time) {
 				bit_and(avail_bitmap, 
diff --git a/src/plugins/sched/wiki/get_jobs.c b/src/plugins/sched/wiki/get_jobs.c
index e638098a1f130f3f149411b8197b138eadb86134..46dbce9a1cc68de0b99b919d90e49a66db39a0d5 100644
--- a/src/plugins/sched/wiki/get_jobs.c
+++ b/src/plugins/sched/wiki/get_jobs.c
@@ -71,25 +71,30 @@ static char *	_task_list(struct job_record *job_ptr);
  *
  * Response format
  * ARG=<cnt>#<JOBID>;
- *	STATE=<state>;
- *	[HOSTLIST=<required_hosts>;]
- *	[TASKLIST=<allocated_hosts>;]
- *	[REJMESSAGE=<reason_job_failed>;]
- *	UPDATE_TIME=<uts>;
- *	WCLIMIT=<time_limit>;
- *	[TASKS=<required_cpus>;]
- *	[NODES=<required_node_cnt>;]
- *	QUEUETIME=<submit_time>;
- *	STARTTIME=<time>;
- *	PARTITIONMASK=<partition>;
- *	RMEM=<mem_size>;
- *	RDISK=<disk_space>;
- *	[COMPLETETIME=<end_time>;]
- *	[SUSPENDTIME=<time_suspended>;]
- *	[UNAME=<user>;]
- *	[GNAME=<group>;]
- *  [#<JOBID>;...];
- *
+ *	STATE=<state>;			Moab equivalent job state
+ *	[HOSTLIST=<node1:node2>;]	list of required nodes, if any
+ *	[TASKLIST=<node1:node2>;]	nodes in use, if running or completing
+ *	[RFEATURES=<features>;]		required features, if any, 
+ *					NOTE: OR operator not supported
+ *	[REJMESSAGE=<str>;]		reason job is not running, if any
+ *	UPDATETIME=<uts>;		time last active
+ *	WCLIMIT=<secs>;			wall clock time limit, seconds
+ *	TASKS=<cpus>;			CPUs required
+ *	[NODES=<nodes>;]		count of nodes required
+ *	DPROCS=<cpus_per_task>;		count of CPUs required per task
+ *	QUEUETIME=<uts>;		submission time
+ *	STARTTIME=<uts>;		time execution started
+ *	PARTITIONMASK=<partition>;	partition name
+ *	RMEM=<MB>;			MB of memory required
+ *	RDISK=<MB>;			MB of disk space required
+ *	[COMPLETETIME=<uts>;]		termination time
+ *	[SUSPENDTIME=<secs>;]		seconds that job has been suspended
+ *	[QOS=<quality_of_service>];	quality of service
+ *	[ACCOUNT=<bank_account>];	bank account name
+ *	[COMMENT=<whatever>;]		job dependency or account number
+ *	UNAME=<user_name>;		user name
+ *	GNAME=<group_name>;		group name
+ * [#<JOBID>;...];			additional jobs, if any
  */
 extern int	get_jobs(char *cmd_ptr, int *err_code, char **err_msg)
 {
diff --git a/src/plugins/sched/wiki/get_nodes.c b/src/plugins/sched/wiki/get_nodes.c
index 7c51c106db97c2f4cf43f17b4cbed4073e65aa3e..02f5b4dd4f27dfc28c3ab999c306b5cc9efe44e6 100644
--- a/src/plugins/sched/wiki/get_nodes.c
+++ b/src/plugins/sched/wiki/get_nodes.c
@@ -1,7 +1,8 @@
 /*****************************************************************************\
  *  get_nodes.c - Process Wiki get node info request
  *****************************************************************************
- *  Copyright (C) 2006 The Regents of the University of California.
+ *  Copyright (C) 2006-2007 The Regents of the University of California.
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
  *  LLNL-CODE-402394.
@@ -57,7 +58,7 @@ static char *	_get_node_state(struct node_record *node_ptr);
  *	[OS=<operating_system>;] Operating system
  *	CMEMORY=<MB>;		 MB of memory on node
  *	CDISK=<MB>;		 MB of disk space on node
- *	CPROCS=<cpus>;		 CPU count on node
+ *	CPROC=<cpus>;		 CPU count on node
  *	[FEATURE=<feature>;]	 Features associated with node, if any
  *  [#<NODEID>:...];
  */
@@ -207,9 +208,22 @@ static char *	_dump_node(struct node_record *node_ptr, time_t update_time)
 
 static char *	_get_node_state(struct node_record *node_ptr)
 {
+	static bool got_select_type = false;
+	static bool node_allocations;
 	uint16_t state = node_ptr->node_state;
 	uint16_t base_state = state & NODE_STATE_BASE;
 
+	if (!got_select_type) {
+		char * select_type = slurm_get_select_type();
+		if (select_type && 
+		    (strcasecmp(select_type, "select/linear") == 0))
+			node_allocations = true;
+		else
+			node_allocations = false;
+		xfree(select_type);
+		got_select_type = true;
+	}
+
 	if ((state & NODE_STATE_DRAIN)
 	||  (state & NODE_STATE_FAIL))
 		return "Draining";
@@ -218,8 +232,12 @@ static char *	_get_node_state(struct node_record *node_ptr)
 
 	if (base_state == NODE_STATE_DOWN)
 		return "Down";
-	if (base_state == NODE_STATE_ALLOCATED)
-		return "Running";
+	if (base_state == NODE_STATE_ALLOCATED) {
+		if (node_allocations)
+			return "Busy";
+		else
+			return "Running";
+	}
 	if (base_state == NODE_STATE_IDLE)
 		return "Idle";
 	
diff --git a/src/plugins/sched/wiki/msg.h b/src/plugins/sched/wiki/msg.h
index 7964a09a7b34fe36f98e0a15c62a51e040cc94a9..d01f20d0019941ac330e90622f949193ed20eebb 100644
--- a/src/plugins/sched/wiki/msg.h
+++ b/src/plugins/sched/wiki/msg.h
@@ -63,6 +63,7 @@
 #include <ctype.h>
 #include <stdio.h>
 #include <stdlib.h>
+#include <strings.h>
 #include <unistd.h>
 #include <sys/stat.h>
 #include <sys/types.h>
diff --git a/src/plugins/sched/wiki2/get_jobs.c b/src/plugins/sched/wiki2/get_jobs.c
index b95a9fcc9d68948c2097d2c96ab3dd3af6140991..ed5d46d0601ca2657c3f6d03865596c82f18615c 100644
--- a/src/plugins/sched/wiki2/get_jobs.c
+++ b/src/plugins/sched/wiki2/get_jobs.c
@@ -99,7 +99,8 @@ reject_msg_t reject_msgs[REJECT_MSG_MAX];
  *	[FLAGS=INTERACTIVE;]		set if interactive (not batch) job
  *	WCLIMIT=<secs>;			wall clock time limit, seconds
  *	TASKS=<cpus>;			CPUs required
- *	NODES=<nodes>;			nodes required
+ *	NODES=<nodes>;			count of nodes required
+ *	DPROCS=<cpus_per_task>;		count of CPUs required per task
  *	QUEUETIME=<uts>;		submission time
  *	STARTTIME=<uts>;		time execution started
  *	RCLASS=<partition>;		SLURM partition name
diff --git a/src/plugins/sched/wiki2/get_nodes.c b/src/plugins/sched/wiki2/get_nodes.c
index 34450f6746a4a318927ad19497c9d41c2e84be0e..58f3a00acc6da7abeea3bf168662f6aada4fddd1 100644
--- a/src/plugins/sched/wiki2/get_nodes.c
+++ b/src/plugins/sched/wiki2/get_nodes.c
@@ -1,7 +1,8 @@
 /*****************************************************************************\
  *  get_nodes.c - Process Wiki get node info request
  *****************************************************************************
- *  Copyright (C) 2006 The Regents of the University of California.
+ *  Copyright (C) 2006-2007 The Regents of the University of California.
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
  *  LLNL-CODE-402394.
@@ -68,7 +69,7 @@ static int	_str_cmp(char *s1, char *s2);
  *	[OS=<operating_system>;] Operating system
  *	CMEMORY=<MB>;		 MB of memory on node
  *	CDISK=<MB>;		 MB of disk space on node
- *	CPROCS=<cpus>;		 CPU count on node
+ *	CPROC=<cpus>;		 CPU count on node
  *	[FEATURE=<feature>;]	 Features associated with node, if any
  *  [#<NODEID>:...];
  */
@@ -357,9 +358,22 @@ static char *	_dump_node(struct node_record *node_ptr, hostlist_t hl,
 
 static char *	_get_node_state(struct node_record *node_ptr)
 {
+	static bool got_select_type = false;
+	static bool node_allocations;
 	uint16_t state = node_ptr->node_state;
 	uint16_t base_state = state & NODE_STATE_BASE;
 
+	if (!got_select_type) {
+		char * select_type = slurm_get_select_type();
+		if (select_type && 
+		    (strcasecmp(select_type, "select/linear") == 0))
+			node_allocations = true;
+		else
+			node_allocations = false;
+		xfree(select_type);
+		got_select_type = true;
+	}
+
 	if ((state & NODE_STATE_DRAIN)
 	||  (state & NODE_STATE_FAIL))
 		return "Draining";
@@ -368,8 +382,12 @@ static char *	_get_node_state(struct node_record *node_ptr)
 
 	if (base_state == NODE_STATE_DOWN)
 		return "Down";
-	if (base_state == NODE_STATE_ALLOCATED)
-		return "Running";
+	if (base_state == NODE_STATE_ALLOCATED) {
+		if (node_allocations)
+			return "Busy";
+		else
+			return "Running";
+	}
 	if (base_state == NODE_STATE_IDLE)
 		return "Idle";
 	
diff --git a/src/plugins/sched/wiki2/msg.c b/src/plugins/sched/wiki2/msg.c
index 50790b3c90929ecbb8a1d7c7efd970fd12662e8d..01cd635c6cf3f9792582e5a910184e88b5f39879 100644
--- a/src/plugins/sched/wiki2/msg.c
+++ b/src/plugins/sched/wiki2/msg.c
@@ -273,7 +273,7 @@ extern int parse_wiki_config(void)
 
 	wiki_conf = _get_wiki_conf_path();
 	if ((wiki_conf == NULL) || (stat(wiki_conf, &buf) == -1)) {
-		debug("No wiki.conf file (%s)", wiki_conf);
+		fatal("No wiki.conf file (%s)", wiki_conf);
 		xfree(wiki_conf);
 		return SLURM_SUCCESS;
 	}
@@ -284,7 +284,7 @@ extern int parse_wiki_config(void)
 		fatal("something wrong with opening/reading wiki.conf file");
 
 	if (! s_p_get_string(&key, "AuthKey", tbl))
-		debug("Warning: No wiki_conf AuthKey specified");
+		fatal("No wiki_conf AuthKey specified");
 	else {
 		strncpy(auth_key, key, sizeof(auth_key));
 		xfree(key);
diff --git a/src/plugins/sched/wiki2/msg.h b/src/plugins/sched/wiki2/msg.h
index 6b58dcf2307133c1be3bbf1d9608338772556e5b..b9d22f64008cca950e43de2c241f25a7eefa01ed 100644
--- a/src/plugins/sched/wiki2/msg.h
+++ b/src/plugins/sched/wiki2/msg.h
@@ -63,6 +63,7 @@
 #include <ctype.h>
 #include <stdio.h>
 #include <stdlib.h>
+#include <strings.h>
 #include <unistd.h>
 #include <sys/stat.h>
 #include <sys/types.h>
diff --git a/src/plugins/select/bluegene/block_allocator/block_allocator.c b/src/plugins/select/bluegene/block_allocator/block_allocator.c
index b9bf065eccb4289d93d0c6654272c9ee70a47d86..216bb3814b1cd3773652d670e400309537451e06 100644
--- a/src/plugins/select/bluegene/block_allocator/block_allocator.c
+++ b/src/plugins/select/bluegene/block_allocator/block_allocator.c
@@ -1,7 +1,7 @@
 /*****************************************************************************\
  *  block_allocator.c - Assorted functions for layout of bglblocks, 
  *	 wiring, mapping for smap, etc.
- *  $Id: block_allocator.c 13934 2008-04-23 23:00:29Z da $
+ *  $Id: block_allocator.c 14322 2008-06-23 22:01:33Z da $
  *****************************************************************************
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -45,6 +45,7 @@
 #include <math.h>
 #include "block_allocator.h"
 #include "src/common/uid.h"
+#include "src/common/timers.h"
 
 #define DEBUG_PA
 #define BEST_COUNT_INIT 20
@@ -1495,6 +1496,107 @@ extern int reset_ba_system(bool track_down_nodes)
 	return 1;
 }
 
+/* need to call rest_all_removed_bps before starting another
+ * allocation attempt 
+ */
+extern int removable_set_bps(char *bps)
+{
+#ifdef HAVE_BG
+	int j=0, number;
+	int x;
+	int y,z;
+	int start[BA_SYSTEM_DIMENSIONS];
+	int end[BA_SYSTEM_DIMENSIONS];
+
+	if(!bps)
+		return SLURM_ERROR;
+
+	while(bps[j] != '\0') {
+		if ((bps[j] == '[' || bps[j] == ',')
+		    && (bps[j+8] == ']' || bps[j+8] == ',')
+		    && (bps[j+4] == 'x' || bps[j+4] == '-')) {
+			
+			j++;
+			number = xstrntol(bps + j, NULL, BA_SYSTEM_DIMENSIONS,
+					  HOSTLIST_BASE);
+			start[X] = number / (HOSTLIST_BASE * HOSTLIST_BASE);
+			start[Y] = (number % (HOSTLIST_BASE * HOSTLIST_BASE))
+				/ HOSTLIST_BASE;
+			start[Z] = (number % HOSTLIST_BASE);
+			j += 4;
+			number = xstrntol(bps + j, NULL, 3, HOSTLIST_BASE);
+			end[X] = number / (HOSTLIST_BASE * HOSTLIST_BASE);
+			end[Y] = (number % (HOSTLIST_BASE * HOSTLIST_BASE))
+				/ HOSTLIST_BASE;
+			end[Z] = (number % HOSTLIST_BASE);
+			j += 3;
+			for (x = start[X]; x <= end[X]; x++) {
+				for (y = start[Y]; y <= end[Y]; y++) {
+					for (z = start[Z]; z <= end[Z]; z++) {
+						if(!ba_system_ptr->grid[x][y][z]
+						   .used)
+							ba_system_ptr->
+								grid[x][y][z]
+								.used = 2;
+					}
+				}
+			}
+			
+			if(bps[j] != ',')
+				break;
+			j--;
+		} else if((bps[j] >= '0' && bps[j] <= '9')
+			  || (bps[j] >= 'A' && bps[j] <= 'Z')) {
+			
+			number = xstrntol(bps + j, NULL, BA_SYSTEM_DIMENSIONS,
+					  HOSTLIST_BASE);
+			x = number / (HOSTLIST_BASE * HOSTLIST_BASE);
+			y = (number % (HOSTLIST_BASE * HOSTLIST_BASE))
+				/ HOSTLIST_BASE;
+			z = (number % HOSTLIST_BASE);
+			j+=3;
+			if(!ba_system_ptr->grid[x][y][z].used)
+				ba_system_ptr->grid[x][y][z].used = 2;
+			
+			if(bps[j] != ',')
+				break;
+			j--;
+		}
+		j++;
+	}
+#endif
+ 	return SLURM_SUCCESS;
+}
+
+extern int reset_all_removed_bps()
+{
+	int x;
+#ifdef HAVE_BG
+	int y,z;
+#endif
+
+	for (x = 0; x < DIM_SIZE[X]; x++) {
+#ifdef HAVE_BG
+		int y, z;
+		for (y = 0; y < DIM_SIZE[Y]; y++)
+			for (z = 0; z < DIM_SIZE[Z]; z++) 
+				if(ba_system_ptr->grid[x][y][z].used == 2) {
+					ba_system_ptr->grid[x][y][z].used = 0;
+				}
+#else
+		if(ba_system_ptr->grid[x].used == 2)
+			ba_system_ptr->grid[x].used = 0;
+#endif
+	}
+	return SLURM_SUCCESS;
+}
+
+/* need to call rest_all_removed_bps before starting another
+ * allocation attempt if possible use removable_set_bps since it is
+ * faster. It does basically the opposite of this function. If you
+ * have to come up with this list though it is faster to use this
+ * function than if you have to call bitmap2node_name since that is slow.
+ */
 extern int set_all_bps_except(char *bps)
 {
 	int x;
@@ -1529,8 +1631,21 @@ extern int set_all_bps_except(char *bps)
 		y = temp;
 		temp = start % HOSTLIST_BASE;
 		z = temp;
+		if(ba_system_ptr->grid[x][y][z].state != NODE_STATE_IDLE) {
+			error("we can't use this node %c%c%c",	
+			      alpha_num[x],
+			      alpha_num[y],
+			      alpha_num[z]);
+
+			return SLURM_ERROR;
+		}
 		ba_system_ptr->grid[x][y][z].state = NODE_STATE_END;
 #else
+		if(ba_system_ptr->grid[x].state != NODE_STATE_IDLE) {
+			error("we can't use this node %d", x);
+
+			return SLURM_ERROR;
+		}
 		ba_system_ptr->grid[x].state = NODE_STATE_END;
 #endif
 		free(host);
@@ -1543,29 +1658,25 @@ extern int set_all_bps_except(char *bps)
 			for (z = 0; z < DIM_SIZE[Z]; z++) {
 				if(ba_system_ptr->grid[x][y][z].state
 				   == NODE_STATE_END) {
-					ba_system_ptr->grid[x][y][z].state = 
-						NODE_STATE_IDLE;
+					ba_system_ptr->grid[x][y][z].state
+						= NODE_STATE_IDLE;
 					ba_system_ptr->grid[x][y][z].used = 
 						false;
-				} else {
-					ba_system_ptr->grid[x][y][z].state = 
-						NODE_STATE_IDLE;
-					ba_system_ptr->grid[x][y][z].used = 
-						true;
+				} else if(!ba_system_ptr->grid[x][y][z].used) {
+					ba_system_ptr->grid[x][y][z].used = 2;
 				}
 			}
 #else
-		if(ba_system_ptr->grid[x].state != NODE_STATE_END) {
+		if(ba_system_ptr->grid[x].state == NODE_STATE_END) {
 			ba_system_ptr->grid[x].state = NODE_STATE_IDLE;
 			ba_system_ptr->grid[x].used = false;
-		} else {
-			ba_system_ptr->grid[x].state = NODE_STATE_IDLE;
-			ba_system_ptr->grid[x].used = true;
+		} else if(!ba_system_ptr->grid[x].used) {
+			ba_system_ptr->grid[x].used = 2;
 		}
 #endif
 	}
-				
-	return 1;
+
+ 	return SLURM_SUCCESS;
 }
 
 /* init_grid - set values of every grid point */
@@ -3188,7 +3299,10 @@ static bool _node_used(ba_node_t* ba_node, int *geometry)
 	
 	/* if we've used this node in another block already */
 	if (!ba_node || ba_node->used) {
-		debug3("node used");
+		debug3("node %c%c%c used", 
+		       alpha_num[ba_node->coord[X]],
+		       alpha_num[ba_node->coord[Y]],
+		       alpha_num[ba_node->coord[Z]]);
 		return true;
 	}
 	/* if we've used this nodes switches completely in another 
@@ -3688,7 +3802,7 @@ static int _find_x_path(List results, ba_node_t *ba_node,
 	}
 	debug3("found - %d",found);
 	for(i=0;i<2;i++) {
-		/* info("trying port %d", ports_to_try[i]); */
+/* 		info("trying port %d", ports_to_try[i]); */
 		/* check to make sure it isn't used */
 		if(!curr_switch->int_wire[ports_to_try[i]].used) {
 			/* looking at the next node on the switch 
@@ -3697,11 +3811,15 @@ static int _find_x_path(List results, ba_node_t *ba_node,
 				ext_wire[ports_to_try[i]].node_tar;
 			port_tar = curr_switch->
 				ext_wire[ports_to_try[i]].port_tar;
-			/* info("goes to %c%c%c port %d", */
-/* 			       alpha_num[node_tar[X]], */
-/* 			       alpha_num[node_tar[Y]], */
-/* 			       alpha_num[node_tar[Z]], */
-/* 			       port_tar); */
+/* 			info("%c%c%c port %d goes to %c%c%c port %d", */
+/* 			     alpha_num[ba_node->coord[X]], */
+/* 			     alpha_num[ba_node->coord[Y]], */
+/* 			     alpha_num[ba_node->coord[Z]], */
+/* 			     ports_to_try[i], */
+/* 			     alpha_num[node_tar[X]], */
+/* 			     alpha_num[node_tar[Y]], */
+/* 			     alpha_num[node_tar[Z]], */
+/* 			     port_tar); */
 			/* check to see if we are back at the start of the
 			   block */
 			if((node_tar[X] == 
diff --git a/src/plugins/select/bluegene/block_allocator/block_allocator.h b/src/plugins/select/bluegene/block_allocator/block_allocator.h
index b9b1c5a15f50cf00277d71ace11aceb01ff5788e..133654462f61a550aae9e54393f56da93aeef35e 100644
--- a/src/plugins/select/bluegene/block_allocator/block_allocator.h
+++ b/src/plugins/select/bluegene/block_allocator/block_allocator.h
@@ -174,7 +174,7 @@ typedef struct
  */
 typedef struct {
 	/* set if using this node in a block */
-	bool used;
+	uint16_t used;
 
 	/* coordinates */
 	int coord[BA_SYSTEM_DIMENSIONS];
@@ -343,6 +343,8 @@ extern char *set_bg_block(List results, int *start,
 			  int *geometry, int conn_type);
 
 extern int reset_ba_system(bool track_down_nodes);
+extern int removable_set_bps(char *bps);
+extern int reset_all_removed_bps();
 extern int set_all_bps_except(char *bps);
 
 extern void init_grid(node_info_msg_t *node_info_ptr);
diff --git a/src/plugins/select/bluegene/plugin/Makefile.am b/src/plugins/select/bluegene/plugin/Makefile.am
index 817ccd600c50122c42b8dd7a83dd9a1bae840cd2..fde0b65dc943164a9f92b0e59d239eee2fed685d 100644
--- a/src/plugins/select/bluegene/plugin/Makefile.am
+++ b/src/plugins/select/bluegene/plugin/Makefile.am
@@ -11,6 +11,7 @@ pkglib_LTLIBRARIES = select_bluegene.la libsched_if64.la
 
 # Blue Gene node selection plugin.
 select_bluegene_la_SOURCES = select_bluegene.c \
+				bg_boot_time.h \
 				bg_job_place.c bg_job_place.h \
 				bg_job_run.c bg_job_run.h \
 				bg_block_info.c bg_block_info.h \
diff --git a/src/plugins/select/bluegene/plugin/Makefile.in b/src/plugins/select/bluegene/plugin/Makefile.in
index 6f705c3df54b92a9e9918151f7f0448f4c79858d..e21b8b361a2eff20a88ace3cbdb67413756b81eb 100644
--- a/src/plugins/select/bluegene/plugin/Makefile.in
+++ b/src/plugins/select/bluegene/plugin/Makefile.in
@@ -310,6 +310,7 @@ pkglib_LTLIBRARIES = select_bluegene.la libsched_if64.la
 
 # Blue Gene node selection plugin.
 select_bluegene_la_SOURCES = select_bluegene.c \
+				bg_boot_time.h \
 				bg_job_place.c bg_job_place.h \
 				bg_job_run.c bg_job_run.h \
 				bg_block_info.c bg_block_info.h \
diff --git a/src/plugins/select/bluegene/plugin/bg_boot_time.h b/src/plugins/select/bluegene/plugin/bg_boot_time.h
new file mode 100644
index 0000000000000000000000000000000000000000..dd53c74c36c8502ff1c6ee5a66fcd631691c4fe4
--- /dev/null
+++ b/src/plugins/select/bluegene/plugin/bg_boot_time.h
@@ -0,0 +1,54 @@
+/*****************************************************************************\
+ *  bg_boot_time.h - Block boot time parameters for use by slurm_prolog
+ *	and slurmctld
+ *****************************************************************************
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Morris Jette <jette1@llnl.gov>
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under 
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef _BG_BOOT_TIME_H_
+#define _BG_BOOT_TIME_H_
+
+/*
+ * Total time to boot a bglblock should not exceed
+ * BG_FREE_PREVIOUS_BLOCK + BG_MIN_BLOCK_BOOT +
+ * (BG_INCR_BLOCK_BOOT * base partition count).
+ * For example, if BG_MIN_BLOCK_BOOT=300, BG_MIN_BLOCK_BOOT=200,
+ * BG_INCR_BLOCK_BOOT=20 and there are 4 blocks being booted, 
+ * wait up to 580 seconds (300 + 200 (20 * 4)).
+ */ 
+
+#define BG_FREE_PREVIOUS_BLOCK 300 	/* time in seconds */
+#define BG_MIN_BLOCK_BOOT  300		/* time in seconds */
+#define BG_INCR_BLOCK_BOOT 20		/* time in seconds per BP */
+
+#endif /* _BG_BOOT_TIME_H_ */
diff --git a/src/plugins/select/bluegene/plugin/bg_job_place.c b/src/plugins/select/bluegene/plugin/bg_job_place.c
index 3cb46be9e85fe66e0a736daca9d1c0ce697076c0..f210fec9e7e436f57b1c652cfcdee241ba968b82 100644
--- a/src/plugins/select/bluegene/plugin/bg_job_place.c
+++ b/src/plugins/select/bluegene/plugin/bg_job_place.c
@@ -2,7 +2,7 @@
  *  bg_job_place.c - blue gene job placement (e.g. base block selection)
  *  functions.
  *
- *  $Id: bg_job_place.c 13999 2008-05-07 22:08:58Z da $ 
+ *  $Id: bg_job_place.c 14295 2008-06-19 23:58:28Z da $ 
  *****************************************************************************
  *  Copyright (C) 2004-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -171,7 +171,7 @@ static int _bg_record_sort_aval_inc(bg_record_t* rec_a, bg_record_t* rec_b)
 /* 
  * Comparator used for sorting blocks smallest to largest
  * 
- * returns: -1: rec_a >rec_b   0: rec_a == rec_b   1: rec_a < rec_b
+ * returns: -1: rec_a > rec_b   0: rec_a == rec_b   1: rec_a < rec_b
  * 
  */
 static int _bg_record_sort_aval_dec(bg_record_t* rec_a, bg_record_t* rec_b)
@@ -1107,6 +1107,17 @@ static int _find_best_block_match(List block_list,
 				 * about it now 
 				 */
 				(*found_bg_record) = list_pop(new_blocks);
+				if(!(*found_bg_record)) {
+					error("got an empty list back");
+					list_destroy(new_blocks);
+					if(bg_record) {
+						destroy_bg_record(bg_record);
+						continue;
+					} else {
+						rc = SLURM_ERROR;
+						break;
+					}
+				}
 				bit_and(slurm_block_bitmap,
 					(*found_bg_record)->bitmap);
 
diff --git a/src/plugins/select/bluegene/plugin/bg_record_functions.c b/src/plugins/select/bluegene/plugin/bg_record_functions.c
index f0f78ecf5fe9d2f71800c27b16c624bf78378ac4..42b4727107ab4821a51fe7b8d05ecde2d137d67a 100644
--- a/src/plugins/select/bluegene/plugin/bg_record_functions.c
+++ b/src/plugins/select/bluegene/plugin/bg_record_functions.c
@@ -143,6 +143,9 @@ extern void process_nodes(bg_record_t *bg_record, bool startup)
 {
 #ifdef HAVE_BG
 	int j=0, number;
+	int diff=0;
+	int largest_diff=-1;
+	int best_start[BA_SYSTEM_DIMENSIONS];
 	int start[BA_SYSTEM_DIMENSIONS];
 	int end[BA_SYSTEM_DIMENSIONS];
 	ListIterator itr;
@@ -154,6 +157,7 @@ extern void process_nodes(bg_record_t *bg_record, bool startup)
 			bg_record->bg_block_list =
 				list_create(destroy_ba_node);
 		}
+		memset(&best_start, 0, sizeof(best_start));
 		bg_record->bp_count = 0;
 		if((bg_record->conn_type == SELECT_SMALL) && (!startup))
 			error("We shouldn't be here there could be some "
@@ -186,14 +190,16 @@ extern void process_nodes(bg_record_t *bg_record, bool startup)
 					/ HOSTLIST_BASE;
 				end[Z] = (number % HOSTLIST_BASE);
 				j += 3;
-				if(!bg_record->bp_count) {
-					bg_record->start[X] = start[X];
-					bg_record->start[Y] = start[Y];
-					bg_record->start[Z] = start[Z];
-					debug2("start is %dx%dx%d",
-					       bg_record->start[X],
-					       bg_record->start[Y],
-					       bg_record->start[Z]);
+				diff = end[X]-start[X];
+				if(diff > largest_diff) {
+					best_start[X] = start[X];
+					best_start[Y] = start[Y];
+					best_start[Z] = start[Z];
+					debug3("start is now %dx%dx%d",
+					       best_start[X],
+					       best_start[Y],
+					       best_start[Z]);
+					largest_diff = diff;
 				}
 				bg_record->bp_count += _addto_node_list(
 					bg_record, 
@@ -217,14 +223,16 @@ extern void process_nodes(bg_record_t *bg_record, bool startup)
 					/ HOSTLIST_BASE;
 				start[Z] = (number % HOSTLIST_BASE);
 				j+=3;
-				if(!bg_record->bp_count) {
-					bg_record->start[X] = start[X];
-					bg_record->start[Y] = start[Y];
-					bg_record->start[Z] = start[Z];
-					debug2("start is %dx%dx%d",
-					       bg_record->start[X],
-					       bg_record->start[Y],
-					       bg_record->start[Z]);
+				diff = 0;
+				if(diff > largest_diff) {
+					best_start[X] = start[X];
+					best_start[Y] = start[Y];
+					best_start[Z] = start[Z];
+					debug3("start is now %dx%dx%d",
+					       best_start[X],
+					       best_start[Y],
+					       best_start[Z]);
+					largest_diff = diff;
 				}
 				bg_record->bp_count += _addto_node_list(
 					bg_record, 
@@ -236,6 +244,16 @@ extern void process_nodes(bg_record_t *bg_record, bool startup)
 			}
 			j++;
 		}
+		if(largest_diff == -1) 
+			fatal("No hostnames given here");
+
+		bg_record->start[X] = best_start[X];
+		bg_record->start[Y] = best_start[Y];
+		bg_record->start[Z] = best_start[Z];
+		debug2("start is %dx%dx%d",
+		       bg_record->start[X],
+		       bg_record->start[Y],
+		       bg_record->start[Z]);
 	}
 	
 	bg_record->geo[X] = 0;
diff --git a/src/plugins/select/bluegene/plugin/defined_block.c b/src/plugins/select/bluegene/plugin/defined_block.c
index 4407f7019b147e63c6f2a2995b1551b33aee8fb5..f915dfdc39c6df30896178594c7bc9f5d71795a7 100644
--- a/src/plugins/select/bluegene/plugin/defined_block.c
+++ b/src/plugins/select/bluegene/plugin/defined_block.c
@@ -100,10 +100,22 @@ extern int create_defined_blocks(bg_layout_t overlapped,
 			   && !bg_record->full_block
 			   && bg_record->cpus_per_bp == procs_per_node) {
 				char *name = NULL;
-				if(overlapped == LAYOUT_OVERLAP) {
+
+				if(overlapped == LAYOUT_OVERLAP) 
 					reset_ba_system(false);
-					set_all_bps_except(bg_record->nodes);
-				}
+									
+				/* we want the bps that aren't
+				 * in this record to mark them as used
+				 */
+				if(set_all_bps_except(bg_record->nodes)
+				   != SLURM_SUCCESS)
+					fatal("something happened in "
+					      "the load of %s"
+					      "Did you use smap to "
+					      "make the "
+					      "bluegene.conf file?",
+					      bg_record->bg_block_id);
+
 				for(i=0; i<BA_SYSTEM_DIMENSIONS; i++) 
 					geo[i] = bg_record->geo[i];
 				debug2("adding %s %c%c%c %c%c%c",
@@ -126,6 +138,7 @@ extern int create_defined_blocks(bg_layout_t overlapped,
 						       "bluegene.conf file?",
 						       bg_record->bg_block_id);
 						list_iterator_destroy(itr);
+						reset_all_removed_bps();
 						slurm_mutex_unlock(
 							&block_state_mutex);
 						return SLURM_ERROR;
@@ -137,6 +150,7 @@ extern int create_defined_blocks(bg_layout_t overlapped,
 						bg_record->start, 
 						geo, 
 						bg_record->conn_type);
+					reset_all_removed_bps();
 					if(!name) {
 						error("I was unable to "
 						      "make the "
diff --git a/src/plugins/select/bluegene/plugin/dynamic_block.c b/src/plugins/select/bluegene/plugin/dynamic_block.c
index 941f4fc7913036b1f3de9d681822bb0c3381ef38..c830428019b3764df73f0c8ac66fe7838f57ef14 100644
--- a/src/plugins/select/bluegene/plugin/dynamic_block.c
+++ b/src/plugins/select/bluegene/plugin/dynamic_block.c
@@ -107,12 +107,8 @@ extern List create_dynamic_block(List block_list,
 	}
 
 	if(request->avail_node_bitmap) {
-		int j=0, number;
-		int x,y,z;
-		char *nodes = NULL;
+ 		char *nodes = NULL;
 		bitstr_t *bitmap = bit_alloc(node_record_count);
-		int start[BA_SYSTEM_DIMENSIONS];
-		int end[BA_SYSTEM_DIMENSIONS];
 		
 		/* we want the bps that aren't in this partition to
 		 * mark them as used
@@ -122,72 +118,8 @@ extern List create_dynamic_block(List block_list,
 		nodes = bitmap2node_name(bitmap);
 		
 		//info("not using %s", nodes);
-		while(nodes[j] != '\0') {
-			if ((nodes[j] == '[' || nodes[j] == ',')
-			    && (nodes[j+8] == ']' || nodes[j+8] == ',')
-			    && (nodes[j+4] == 'x' || nodes[j+4] == '-')) {
+		removable_set_bps(nodes);
 
-				j++;
-				number = xstrntol(nodes + j,
-						  NULL, BA_SYSTEM_DIMENSIONS,
-						  HOSTLIST_BASE);
-				start[X] = number / 
-					(HOSTLIST_BASE * HOSTLIST_BASE);
-				start[Y] = (number % 
-					    (HOSTLIST_BASE * HOSTLIST_BASE))
-					/ HOSTLIST_BASE;
-				start[Z] = (number % HOSTLIST_BASE);
-				j += 4;
-				number = xstrntol(nodes + j,
-						NULL, 3, HOSTLIST_BASE);
-				end[X] = number /
-					(HOSTLIST_BASE * HOSTLIST_BASE);
-				end[Y] = (number 
-					  % (HOSTLIST_BASE * HOSTLIST_BASE))
-					/ HOSTLIST_BASE;
-				end[Z] = (number % HOSTLIST_BASE);
-				j += 3;
-				for (x = start[X]; x <= end[X]; x++) {
-					for (y = start[Y]; y <= end[Y]; y++) {
-						for (z = start[Z]; 
-						     z <= end[Z]; z++) {
-							ba_system_ptr->
-								grid[x]
-#ifdef HAVE_BG
-								[y][z]
-#endif
-								.used = 1;
-						}
-					}
-				}
-				
-				if(nodes[j] != ',')
-					break;
-				j--;
-			} else if((nodes[j] >= '0' && nodes[j] <= '9')
-				  || (nodes[j] >= 'A' && nodes[j] <= 'Z')) {
-				
-				number = xstrntol(nodes + j,
-						  NULL, BA_SYSTEM_DIMENSIONS,
-						  HOSTLIST_BASE);
-				x = number / (HOSTLIST_BASE * HOSTLIST_BASE);
-				y = (number % (HOSTLIST_BASE * HOSTLIST_BASE))
-					/ HOSTLIST_BASE;
-				z = (number % HOSTLIST_BASE);
-				j+=3;
-
-				ba_system_ptr->grid[x]
-#ifdef HAVE_BG
-					[y][z]
-#endif
-					.used = 1;
-
-				if(nodes[j] != ',')
-					break;
-				j--;
-			}
-			j++;
-		}
 		xfree(nodes);
 		FREE_NULL_BITMAP(bitmap);
 	}
@@ -324,6 +256,8 @@ no_list:
 	add_bg_record(new_blocks, results, &blockreq);
 
 finished:
+	reset_all_removed_bps();
+	
 	xfree(request->save_name);
 	
 	if(request->elongate_geos) {
diff --git a/src/plugins/select/bluegene/plugin/select_bluegene.c b/src/plugins/select/bluegene/plugin/select_bluegene.c
index 1d17316b75c648a25ea48dfe5468db862d6f3c35..9135f35b16b40786e2655760c510b39ce04a169c 100644
--- a/src/plugins/select/bluegene/plugin/select_bluegene.c
+++ b/src/plugins/select/bluegene/plugin/select_bluegene.c
@@ -1,7 +1,7 @@
 /*****************************************************************************\
  *  select_bluegene.c - node selection plugin for Blue Gene system.
  * 
- *  $Id: select_bluegene.c 14091 2008-05-20 21:34:02Z da $
+ *  $Id: select_bluegene.c 14295 2008-06-19 23:58:28Z da $
  *****************************************************************************
  *  Copyright (C) 2004-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -261,6 +261,7 @@ extern int select_p_state_save(char *dir_name)
 	Buf buffer = init_buf(BUF_SIZE);
 	DEF_TIMERS;
 
+	debug("bluegene: select_p_state_save");
 	START_TIMER;
 	/* write header: time */
 	packstr(BLOCK_STATE_VERSION, buffer);
@@ -363,7 +364,10 @@ extern int select_p_state_restore(char *dir_name)
 	int blocks = 0;
 
 	debug("bluegene: select_p_state_restore");
-
+#ifdef HAVE_BG_FILES
+	debug("This doesn't do anything on a real bluegene system");
+	return SLURM_SUCCESS;
+#endif
 	if(!dir_name) {
 		debug2("Starting bluegene with clean slate");
 		return SLURM_SUCCESS;
diff --git a/src/plugins/select/bluegene/plugin/slurm_prolog.c b/src/plugins/select/bluegene/plugin/slurm_prolog.c
index dbe8a93328074e7aa174110588617de0587e4e20..af652b7879cab947ecc2e5ab908d64b8c9d1e4bd 100644
--- a/src/plugins/select/bluegene/plugin/slurm_prolog.c
+++ b/src/plugins/select/bluegene/plugin/slurm_prolog.c
@@ -54,23 +54,12 @@
 #include "src/common/hostlist.h"
 #include "src/common/node_select.h"
 #include "src/api/node_select_info.h"
+#include "src/plugins/select/bluegene/plugin/bg_boot_time.h"
 
 #define _DEBUG 0
-
-/*
- * Check the bgblock's status every POLL_SLEEP seconds. 
- * Retry for a period of 
- * MIN_FREE_PERVIOUS_BLOCK_DELAY + MIN_DELAY + (INCR_DELAY * base partition count)
- * For example if MIN_FREE_PERVIOUS_BLOCK_DELAY=300 and MIN_DELAY=600 and 
- * INCR_DELAY=20 and job_size=4 base partitions then wait up to 980 seconds
- * 300 + 600 + (20 * 4)
- */ 
 #define POLL_SLEEP 3			/* retry interval in seconds  */
-#define MIN_FREE_PERVIOUS_BLOCK_DELAY 300 /* time in seconds */
-#define MIN_DELAY  600			/* time in seconds */
-#define INCR_DELAY 20			/* time in seconds per BP */
 
-int max_delay = MIN_DELAY + MIN_FREE_PERVIOUS_BLOCK_DELAY;
+int max_delay = BG_FREE_PREVIOUS_BLOCK + BG_MIN_BLOCK_BOOT;
 int cur_delay = 0; 
   
 enum rm_partition_state {RM_PARTITION_FREE, 
@@ -113,8 +102,8 @@ static int _wait_part_ready(uint32_t job_id)
 {
 	int is_ready = 0, i, rc;
 	
-	max_delay = MIN_FREE_PERVIOUS_BLOCK_DELAY + MIN_DELAY +
-		(INCR_DELAY * _get_job_size(job_id));
+	max_delay = BG_FREE_PREVIOUS_BLOCK + BG_MIN_BLOCK_BOOT +
+		   (BG_INCR_BLOCK_BOOT * _get_job_size(job_id));
 
 #if _DEBUG
 	printf("Waiting for job %u to become ready.", job_id);
diff --git a/src/plugins/select/cons_res/dist_tasks.c b/src/plugins/select/cons_res/dist_tasks.c
index 1a9dc8492dfc0d72a423832795501a8c1c4d3086..7a4fafe861d3c29b004cb16ee78a43e80884ad56 100644
--- a/src/plugins/select/cons_res/dist_tasks.c
+++ b/src/plugins/select/cons_res/dist_tasks.c
@@ -437,7 +437,7 @@ extern int cr_dist(struct select_cr_job *job, int cyclic,
 		     job->job_id, host_index, this_cr_node->node_ptr->name, 
 		     job->alloc_cpus[job_index]);
 		for(i=0; !cr_cpu && i<job->num_sockets[job_index];i+=2) {
-			info("cons_res: _cr_dist: %u " 
+			info("cons_res: _cr_dist: job %u " 
 			     "alloc_cores[%d][%d]=%u, [%d][%d]=%u", 
 			     job->job_id, 
 			     job_index, i, job->alloc_cores[job_index][i], 
diff --git a/src/plugins/select/cons_res/select_cons_res.c b/src/plugins/select/cons_res/select_cons_res.c
index acca6dc9ee4f137b7e32b3ab6d97072428b35de1..e878ffe6f33f70dbc45c731498a2de149db9ba1f 100644
--- a/src/plugins/select/cons_res/select_cons_res.c
+++ b/src/plugins/select/cons_res/select_cons_res.c
@@ -2,7 +2,7 @@
  *  select_cons_res.c - node selection plugin supporting consumable 
  *  resources policies.
  *
- *  $Id: select_cons_res.c 13814 2008-04-07 15:46:55Z jette $
+ *  $Id: select_cons_res.c 14297 2008-06-20 15:41:06Z jette $
  *****************************************************************************\
  *
  *  The following example below illustrates how four jobs are allocated
@@ -530,9 +530,9 @@ static uint16_t _get_task_count(struct node_cr_record *select_node_ptr,
 					    if ((cr_type == CR_SOCKET) ||
 						(cr_type == CR_SOCKET_MEMORY)) {
 						if (p_ptr->alloc_cores[i])
-							alloc_cores[i] = cores;
+							alloc_cores[i] += cores;
 					    } else {
-						alloc_cores[i] =
+						alloc_cores[i] +=
 							p_ptr->alloc_cores[i];
 					    }
 					}
@@ -573,9 +573,10 @@ static uint16_t _get_task_count(struct node_cr_record *select_node_ptr,
 				}
 				if (try_partial_idle && (alloc_row > -1)) {
 					alloc_row *= sockets;
-					for (i = 0; i < sockets; i++)
-						alloc_cores[i] =
-						p_ptr->alloc_cores[alloc_row+i];
+					for (i = 0; i < sockets; i++) {
+						alloc_cores[i] += p_ptr->
+							alloc_cores[alloc_row+i];
+					}
 				}
 			}
 		}
@@ -1808,7 +1809,7 @@ static int _eval_nodes(struct job_record *job_ptr, bitstr_t * bitmap,
 				avail_cpus = _get_task_cnt(job_ptr, i,
 							   task_cnt, freq,
 							   array_size);
-				if(avail_cpus <= 0)
+				if (avail_cpus <= 0)
 					continue;
 				rem_cpus -= avail_cpus;
 				bit_set(bitmap, i);
@@ -1826,8 +1827,14 @@ static int _eval_nodes(struct job_record *job_ptr, bitstr_t * bitmap,
 				avail_cpus = _get_task_cnt(job_ptr, i,
 							   task_cnt, freq,
 							   array_size);
-				if(avail_cpus <= 0)
+				if (avail_cpus <= 0)
 					continue;
+				if ((max_nodes == 1) && 
+				    (avail_cpus < rem_cpus)) {
+					/* Job can only take one more node and
+					 * this one has insufficient CPU */
+					continue;
+				}
 				rem_cpus -= avail_cpus;
 				bit_set(bitmap, i);
 				rem_nodes--;
@@ -2623,6 +2630,7 @@ static int _job_test(struct job_record *job_ptr, bitstr_t *bitmap,
 		case SLURM_DIST_CYCLIC_BLOCK:
 			error_code = cr_dist(job, 0, cr_type); 
 			break;
+		case SLURM_DIST_ARBITRARY:
 		case SLURM_DIST_BLOCK:
 		case SLURM_DIST_CYCLIC:				
 		case SLURM_DIST_BLOCK_CYCLIC:
@@ -2633,12 +2641,9 @@ static int _job_test(struct job_record *job_ptr, bitstr_t *bitmap,
 		case SLURM_DIST_PLANE:
 			error_code = cr_plane_dist(job, mc_ptr->plane_size, cr_type); 
 			break;
-		case SLURM_DIST_ARBITRARY:
 		default:
-			error_code = compute_c_b_task_dist(job);
-			if (error_code != SLURM_SUCCESS) {
-				error(" Error in compute_c_b_task_dist");
-			}
+			error("select/cons_res: invalid dist_type");
+			error_code = SLURM_ERROR;
 			break;
 		}
 	}
diff --git a/src/sacct/options.c b/src/sacct/options.c
index 330b6254884f992ab2e7481879900a90934ba5c6..432c3f86014f2c2cb9a32e5cb0129a6f815603fd 100644
--- a/src/sacct/options.c
+++ b/src/sacct/options.c
@@ -41,8 +41,6 @@
 #include "sacct.h"
 #include <time.h>
 
-void _destroy_parts(void *object);
-void _destroy_steps(void *object);
 void _help_fields_msg(void);
 void _help_msg(void);
 void _usage(void);
@@ -53,22 +51,6 @@ List selected_parts = NULL;
 List selected_steps = NULL;
 void *acct_db_conn = NULL;
 
-void _destroy_parts(void *object)
-{
-	char *part = (char *)object;
-	xfree(part);
-}
-
-void _destroy_steps(void *object)
-{
-	jobacct_selected_step_t *step = (jobacct_selected_step_t *)object;
-	if(step) {
-		xfree(step->job);
-		xfree(step->step);
-		xfree(step);
-	}
-}
-
 void _show_rec(char *f[])
 {
 	int 	i;
@@ -221,7 +203,7 @@ void _usage(void)
 
 void _init_params()
 {
-	params.opt_cluster = NULL;	/* --cluster */
+	params.opt_cluster = slurm_get_cluster_name();	/* --cluster */
 	params.opt_completion = 0;	/* --completion */
 	params.opt_dump = 0;		/* --dump */
 	params.opt_dup = -1;		/* --duplicates; +1 = explicitly set */
@@ -396,6 +378,7 @@ void parse_command_line(int argc, char **argv)
 			params.opt_completion = 1;
 			break;
 		case 'C':
+			xfree(params.opt_cluster);
 			params.opt_cluster = xstrdup(optarg);
 			break;
 		case 'd':
@@ -788,8 +771,8 @@ void parse_command_line(int argc, char **argv)
 				 (params.opt_field_list==NULL? 0 :
 				  sizeof(params.opt_field_list)) +
 				 strlen(dot)+1);
-		strcat(params.opt_field_list, dot);
-		strcat(params.opt_field_list, ",");
+		xstrcat(params.opt_field_list, dot);
+		xstrcat(params.opt_field_list, ",");
 	} 
 
 	if(long_output) {
@@ -803,8 +786,8 @@ void parse_command_line(int argc, char **argv)
 				 (params.opt_field_list==NULL? 0 :
 				  strlen(params.opt_field_list)) +
 				 strlen(dot)+1);
-		strcat(params.opt_field_list, dot);
-		strcat(params.opt_field_list, ",");
+		xstrcat(params.opt_field_list, dot);
+		xstrcat(params.opt_field_list, ",");
 	} 
 	
 	if (params.opt_field_list==NULL) {
@@ -814,9 +797,8 @@ void parse_command_line(int argc, char **argv)
 			dot = DEFAULT_COMP_FIELDS;
 		else
 			dot = DEFAULT_FIELDS;
-		params.opt_field_list = xmalloc(strlen(dot)+1);
-		strcpy(params.opt_field_list, dot); 
-		strcat(params.opt_field_list, ",");
+		params.opt_field_list = xstrdup(dot);
+		xstrcat(params.opt_field_list, ",");
 	}
 
 	start = params.opt_field_list;
@@ -1125,8 +1107,8 @@ void do_list(void)
 		do_jobsteps = 0;
 	itr = list_iterator_create(jobs);
 	while((job = list_next(itr))) {
-		/* FIX ME: this should be handled while getting the
-		   data, not afterwards.
+		/* This is really handled when we got the data except
+		   for the filetxt plugin so keep it here.
 		*/
 		if (params.opt_uid >= 0 && (job->uid != params.opt_uid))
 			continue;
@@ -1212,8 +1194,8 @@ void do_stat()
 void sacct_init()
 {
 	int i=0;
-	selected_parts = list_create(_destroy_parts);
-	selected_steps = list_create(_destroy_steps);
+	selected_parts = list_create(slurm_destroy_char);
+	selected_steps = list_create(destroy_jobacct_selected_step);
 	for(i=0; i<STATE_COUNT; i++)
 		selected_state[i] = 0;
 }
diff --git a/src/sacct/print.c b/src/sacct/print.c
index 61a70f8e4f8ca7284923fc84ef62475803c8c533..91c1231b18a2028a5ce4a09f22c55e8878eb188f 100644
--- a/src/sacct/print.c
+++ b/src/sacct/print.c
@@ -617,13 +617,13 @@ void print_rss(type_t type, void *object)
 		nodes = job->nodes;
 		pos = sacct.min_cpu_id.nodeid;				 
 		convert_num_unit((float)sacct.max_rss, buf1, sizeof(buf1),
-				 UNIT_NONE);
+				 UNIT_KILO);
 
 		if(job->track_steps)
 			snprintf(outbuf, FORMAT_STRING_SIZE, "%s/- - -", buf1);
 		else {
 			convert_num_unit((float)sacct.ave_rss, 
-					 buf2, sizeof(buf2), UNIT_NONE);
+					 buf2, sizeof(buf2), UNIT_KILO);
 			find_hostname(pos, nodes, buf3);
 			snprintf(outbuf, FORMAT_STRING_SIZE, "%s/%s:%u - %s", 
 				 buf1,
@@ -638,9 +638,9 @@ void print_rss(type_t type, void *object)
 		nodes = step->nodes;
 		pos = sacct.min_cpu_id.nodeid;				 
 		convert_num_unit((float)sacct.max_rss, buf1, sizeof(buf1),
-				 UNIT_NONE);
+				 UNIT_KILO);
 		convert_num_unit((float)sacct.ave_rss, buf2, sizeof(buf2),
-				 UNIT_NONE);
+				 UNIT_KILO);
 		find_hostname(pos, nodes, buf3);
 		snprintf(outbuf, FORMAT_STRING_SIZE, "%s/%s:%u - %s", 
 			 buf1,
@@ -970,12 +970,12 @@ void print_vsize(type_t type, void *object)
 		nodes = job->nodes;
 		pos = sacct.min_cpu_id.nodeid;				 
 		convert_num_unit((float)sacct.max_vsize, 
-				 buf1, sizeof(buf1),UNIT_NONE);
+				 buf1, sizeof(buf1), UNIT_KILO);
 		if(job->track_steps)
 			snprintf(outbuf, FORMAT_STRING_SIZE, "%s/- - -", buf1);
 		else {
 			convert_num_unit((float)sacct.ave_vsize,
-					 buf2, sizeof(buf2), UNIT_NONE);
+					 buf2, sizeof(buf2), UNIT_KILO);
 			find_hostname(pos, nodes, buf3);
 			snprintf(outbuf, FORMAT_STRING_SIZE, "%s/%s:%u - %s", 
 				 buf1,
@@ -990,9 +990,9 @@ void print_vsize(type_t type, void *object)
 		nodes = step->nodes;
 		pos = sacct.min_cpu_id.nodeid;				 
 		convert_num_unit((float)sacct.max_vsize, buf1, sizeof(buf1), 
-				 UNIT_NONE);
+				 UNIT_KILO);
 		convert_num_unit((float)sacct.ave_vsize, buf2, sizeof(buf2),
-				 UNIT_NONE);
+				 UNIT_KILO);
 		find_hostname(pos, nodes, buf3);
 		snprintf(outbuf, FORMAT_STRING_SIZE, "%s/%s:%u - %s", 
 			 buf1,
diff --git a/src/sacct/sacct.c b/src/sacct/sacct.c
index 8206aaf844e7d0a6d5d594c90c66b734edb8415f..79c6981c3bca7000053ea8a84c02f3b71ad63fea 100644
--- a/src/sacct/sacct.c
+++ b/src/sacct/sacct.c
@@ -308,7 +308,7 @@ int main(int argc, char **argv)
 		fprintf(stderr,
 			"This functionality has been replaced with 'sstat' "
 			"in the future please make note this will "
-			"not be supported.");
+			"not be supported.\n");
 		
 		if (params.opt_header) 	/* give them something to look */
 			_print_header();/* at while we think...        */
diff --git a/src/sacctmgr/Makefile.am b/src/sacctmgr/Makefile.am
index bf354ad3ed90f82e366284f815ed22edd2634a41..c495e0e102fee1459c4ac06cc09f50d237468c98 100644
--- a/src/sacctmgr/Makefile.am
+++ b/src/sacctmgr/Makefile.am
@@ -11,7 +11,6 @@ sacctmgr_SOURCES =	\
 	association_functions.c	\
 	cluster_functions.c	\
 	common.c                \
-	print.c	print.h	        \
 	sacctmgr.c sacctmgr.h	\
 	user_functions.c	
 
diff --git a/src/sacctmgr/Makefile.in b/src/sacctmgr/Makefile.in
index 585445fa5aaf546dda8cac2f4e11f32e3d4fef5a..072c2a165387ce8d47bde9c2bc92bd1bf8b14345 100644
--- a/src/sacctmgr/Makefile.in
+++ b/src/sacctmgr/Makefile.in
@@ -72,8 +72,7 @@ binPROGRAMS_INSTALL = $(INSTALL_PROGRAM)
 PROGRAMS = $(bin_PROGRAMS)
 am_sacctmgr_OBJECTS = account_functions.$(OBJEXT) \
 	association_functions.$(OBJEXT) cluster_functions.$(OBJEXT) \
-	common.$(OBJEXT) print.$(OBJEXT) sacctmgr.$(OBJEXT) \
-	user_functions.$(OBJEXT)
+	common.$(OBJEXT) sacctmgr.$(OBJEXT) user_functions.$(OBJEXT)
 sacctmgr_OBJECTS = $(am_sacctmgr_OBJECTS)
 am__DEPENDENCIES_1 =
 sacctmgr_DEPENDENCIES = $(top_builddir)/src/common/libcommon.o \
@@ -273,7 +272,6 @@ sacctmgr_SOURCES = \
 	association_functions.c	\
 	cluster_functions.c	\
 	common.c                \
-	print.c	print.h	        \
 	sacctmgr.c sacctmgr.h	\
 	user_functions.c	
 
@@ -358,7 +356,6 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/association_functions.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cluster_functions.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/common.Po@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/print.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sacctmgr.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/user_functions.Po@am__quote@
 
diff --git a/src/sacctmgr/account_functions.c b/src/sacctmgr/account_functions.c
index ed0e3d916b9907c650fca3756c75f1ad71137b08..876d82beb22d5014f91619c76fa572f9fba095ce 100644
--- a/src/sacctmgr/account_functions.c
+++ b/src/sacctmgr/account_functions.c
@@ -37,7 +37,6 @@
 \*****************************************************************************/
 
 #include "src/sacctmgr/sacctmgr.h"
-#include "src/sacctmgr/print.h"
 
 static int _set_cond(int *start, int argc, char *argv[],
 		     acct_account_cond_t *acct_cond,
@@ -55,6 +54,8 @@ static int _set_cond(int *start, int argc, char *argv[],
 			break;
 		} else if (strncasecmp (argv[i], "WithAssoc", 4) == 0) {
 			acct_cond->with_assocs = 1;
+		} else if(!end && !strncasecmp(argv[i], "where", 5)) {
+			continue;
 		} else if(!end) {
 			addto_char_list(acct_cond->acct_list, argv[i]);
 			addto_char_list(acct_cond->assoc_cond->acct_list,
@@ -83,7 +84,7 @@ static int _set_cond(int *start, int argc, char *argv[],
 			u_set = 1;
 		} else if (strncasecmp (argv[i], "Parent", 1) == 0) {
 			acct_cond->assoc_cond->parent_acct =
-				xstrdup(argv[i]+end);
+				strip_quotes(argv[i]+end, NULL);
 			a_set = 1;
 		} else if (strncasecmp (argv[i], "QosLevel", 1) == 0) {
 			acct_cond->qos = str_2_acct_qos(argv[i]+end);
@@ -118,11 +119,13 @@ static int _set_rec(int *start, int argc, char *argv[],
 		if (strncasecmp (argv[i], "Where", 5) == 0) {
 			i--;
 			break;
+		} else if(!end && !strncasecmp(argv[i], "set", 3)) {
+			continue;
 		} else if(!end) {
 			printf(" Bad format on %s: End your option with "
 			       "an '=' sign\n", argv[i]);
 		} else if (strncasecmp (argv[i], "Description", 1) == 0) {
-			acct->description = xstrdup(argv[i]+end);
+			acct->description =  strip_quotes(argv[i]+end, NULL);
 			u_set = 1;
 		} else if (strncasecmp (argv[i], "FairShare", 1) == 0) {
 			if (get_uint(argv[i]+end, &assoc->fairshare, 
@@ -151,10 +154,10 @@ static int _set_rec(int *start, int argc, char *argv[],
 					argv[i]);
 			}
 		} else if (strncasecmp (argv[i], "Organization", 1) == 0) {
-			acct->organization = xstrdup(argv[i]+end);
+			acct->organization = strip_quotes(argv[i]+end, NULL);
 			u_set = 1;
 		} else if (strncasecmp (argv[i], "Parent", 1) == 0) {
-			assoc->parent_acct = xstrdup(argv[i]+end);
+			assoc->parent_acct = strip_quotes(argv[i]+end, NULL);
 			a_set = 1;
 		} else if (strncasecmp (argv[i], "QosLevel=", 1) == 0) {
 			acct->qos = str_2_acct_qos(argv[i]+end);
@@ -275,7 +278,7 @@ extern int sacctmgr_add_account(int argc, char *argv[])
 		} else if (strncasecmp (argv[i], "Cluster", 1) == 0) {
 			addto_char_list(cluster_list, argv[i]+end);
 		} else if (strncasecmp (argv[i], "Description", 1) == 0) {
-			description = xstrdup(argv[i]+end);
+			description = strip_quotes(argv[i]+end, NULL);
 		} else if (strncasecmp (argv[i], "FairShare", 1) == 0) {
 			if (get_uint(argv[i]+end, &fairshare, 
 			    "FairShare") == SLURM_SUCCESS)
@@ -304,9 +307,9 @@ extern int sacctmgr_add_account(int argc, char *argv[])
 		} else if (strncasecmp (argv[i], "Names", 1) == 0) {
 			addto_char_list(name_list, argv[i]+end);
 		} else if (strncasecmp (argv[i], "Organization", 1) == 0) {
-			organization = xstrdup(argv[i]+end);
+			organization = strip_quotes(argv[i]+end, NULL);
 		} else if (strncasecmp (argv[i], "Parent", 1) == 0) {
-			parent = xstrdup(argv[i]+end);
+			parent = strip_quotes(argv[i]+end, NULL);
 		} else if (strncasecmp (argv[i], "QosLevel", 1) == 0) {
 			qos = str_2_acct_qos(argv[i]+end);
 		} else {
@@ -686,6 +689,7 @@ extern int sacctmgr_list_account(int argc, char *argv[])
 	destroy_acct_account_cond(acct_cond);
 
 	if(!acct_list) {
+		printf(" Problem with query.\n");
 		list_destroy(format_list);
 		return SLURM_ERROR;
 	}
@@ -698,72 +702,72 @@ extern int sacctmgr_list_account(int argc, char *argv[])
 			field->type = PRINT_ACCOUNT;
 			field->name = xstrdup("Account");
 			field->len = 10;
-			field->print_routine = print_str;
+			field->print_routine = print_fields_str;
 		} else if(!strncasecmp("Cluster", object, 1)) {
 			field->type = PRINT_CLUSTER;
 			field->name = xstrdup("Cluster");
 			field->len = 10;
-			field->print_routine = print_str;
+			field->print_routine = print_fields_str;
 		} else if(!strncasecmp("Description", object, 1)) {
 			field->type = PRINT_DESC;
 			field->name = xstrdup("Descr");
 			field->len = 20;
-			field->print_routine = print_str;
+			field->print_routine = print_fields_str;
 		} else if(!strncasecmp("FairShare", object, 1)) {
 			field->type = PRINT_FAIRSHARE;
 			field->name = xstrdup("FairShare");
 			field->len = 9;
-			field->print_routine = print_uint;
+			field->print_routine = print_fields_uint;
 		} else if(!strncasecmp("ID", object, 1)) {
 			field->type = PRINT_ID;
 			field->name = xstrdup("ID");
 			field->len = 6;
-			field->print_routine = print_uint;
+			field->print_routine = print_fields_uint;
 		} else if(!strncasecmp("MaxCPUSecs", object, 4)) {
 			field->type = PRINT_MAXC;
 			field->name = xstrdup("MaxCPUSecs");
 			field->len = 11;
-			field->print_routine = print_uint;
+			field->print_routine = print_fields_uint;
 		} else if(!strncasecmp("MaxJobs", object, 4)) {
 			field->type = PRINT_MAXJ;
 			field->name = xstrdup("MaxJobs");
 			field->len = 7;
-			field->print_routine = print_uint;
+			field->print_routine = print_fields_uint;
 		} else if(!strncasecmp("MaxNodes", object, 4)) {
 			field->type = PRINT_MAXN;
 			field->name = xstrdup("MaxNodes");
 			field->len = 8;
-			field->print_routine = print_uint;
+			field->print_routine = print_fields_uint;
 		} else if(!strncasecmp("MaxWall", object, 4)) {
 			field->type = PRINT_MAXW;
 			field->name = xstrdup("MaxWall");
 			field->len = 11;
-			field->print_routine = print_time;
+			field->print_routine = print_fields_time;
 		} else if(!strncasecmp("Organization", object, 1)) {
 			field->type = PRINT_ORG;
 			field->name = xstrdup("Org");
 			field->len = 20;
-			field->print_routine = print_str;
+			field->print_routine = print_fields_str;
 		} else if(!strncasecmp("QOS", object, 1)) {
 			field->type = PRINT_QOS;
 			field->name = xstrdup("QOS");
 			field->len = 9;
-			field->print_routine = print_str;
+			field->print_routine = print_fields_str;
 		} else if(!strncasecmp("ParentID", object, 7)) {
 			field->type = PRINT_PID;
 			field->name = xstrdup("Par ID");
 			field->len = 6;
-			field->print_routine = print_uint;
+			field->print_routine = print_fields_uint;
 		} else if(!strncasecmp("ParentName", object, 7)) {
 			field->type = PRINT_PNAME;
 			field->name = xstrdup("Par Name");
 			field->len = 10;
-			field->print_routine = print_str;
+			field->print_routine = print_fields_str;
 		} else if(!strncasecmp("User", object, 1)) {
 			field->type = PRINT_USER;
 			field->name = xstrdup("User");
 			field->len = 10;
-			field->print_routine = print_str;
+			field->print_routine = print_fields_str;
 		} else {
 			printf("Unknown field '%s'\n", object);
 			xfree(field);
@@ -775,7 +779,7 @@ extern int sacctmgr_list_account(int argc, char *argv[])
 
 	itr = list_iterator_create(acct_list);
 	itr2 = list_iterator_create(print_fields_list);
-	print_header(print_fields_list);
+	print_fields_header(print_fields_list);
 
 	while((acct = list_next(itr))) {
 		if(acct->assoc_list && list_count(acct->assoc_list)) {
@@ -966,8 +970,6 @@ extern int sacctmgr_list_account(int argc, char *argv[])
 		}
 	}
 
-	printf("\n");
-
 	list_iterator_destroy(itr2);
 	list_iterator_destroy(itr);
 	list_destroy(acct_list);
diff --git a/src/sacctmgr/association_functions.c b/src/sacctmgr/association_functions.c
index cec4ace3a0b52db86a156f640d475153ade21ffd..c7ae5a630f35df8d653cc66b8d8d5cf2c7a79495 100644
--- a/src/sacctmgr/association_functions.c
+++ b/src/sacctmgr/association_functions.c
@@ -37,7 +37,6 @@
 \*****************************************************************************/
 
 #include "src/sacctmgr/sacctmgr.h"
-#include "src/sacctmgr/print.h"
 
 static int _set_cond(int *start, int argc, char *argv[],
 		     acct_association_cond_t *association_cond,
@@ -48,7 +47,9 @@ static int _set_cond(int *start, int argc, char *argv[],
 
 	for (i=(*start); i<argc; i++) {
 		end = parse_option_end(argv[i]);
-		if(!end) {
+		if(!end && !strncasecmp(argv[i], "where", 5)) {
+			continue;
+		} else if(!end) {
 			addto_char_list(association_cond->id_list, argv[i]);
 			set = 1;
 		} else if (strncasecmp (argv[i], "Id", 1) == 0) {
@@ -235,6 +236,7 @@ extern int sacctmgr_list_association(int argc, char *argv[])
 	destroy_acct_association_cond(assoc_cond);
 	
 	if(!assoc_list) {
+		printf(" Problem with query.\n");
 		list_destroy(format_list);
 		return SLURM_ERROR;
 	}
@@ -250,62 +252,62 @@ extern int sacctmgr_list_association(int argc, char *argv[])
 			field->type = PRINT_ACCOUNT;
 			field->name = xstrdup("Account");
 			field->len = 10;
-			field->print_routine = print_str;
+			field->print_routine = print_fields_str;
 		} else if(!strncasecmp("Cluster", object, 1)) {
 			field->type = PRINT_CLUSTER;
 			field->name = xstrdup("Cluster");
 			field->len = 10;
-			field->print_routine = print_str;
+			field->print_routine = print_fields_str;
 		} else if(!strncasecmp("FairShare", object, 1)) {
 			field->type = PRINT_FAIRSHARE;
 			field->name = xstrdup("FairShare");
 			field->len = 9;
-			field->print_routine = print_uint;
+			field->print_routine = print_fields_uint;
 		} else if(!strncasecmp("ID", object, 1)) {
 			field->type = PRINT_ID;
 			field->name = xstrdup("ID");
 			field->len = 6;
-			field->print_routine = print_uint;
+			field->print_routine = print_fields_uint;
 		} else if(!strncasecmp("MaxCPUSecs", object, 4)) {
 			field->type = PRINT_MAXC;
 			field->name = xstrdup("MaxCPUSecs");
 			field->len = 11;
-			field->print_routine = print_uint;
+			field->print_routine = print_fields_uint;
 		} else if(!strncasecmp("MaxJobs", object, 4)) {
 			field->type = PRINT_MAXJ;
 			field->name = xstrdup("MaxJobs");
 			field->len = 7;
-			field->print_routine = print_uint;
+			field->print_routine = print_fields_uint;
 		} else if(!strncasecmp("MaxNodes", object, 4)) {
 			field->type = PRINT_MAXN;
 			field->name = xstrdup("MaxNodes");
 			field->len = 8;
-			field->print_routine = print_uint;
+			field->print_routine = print_fields_uint;
 		} else if(!strncasecmp("MaxWall", object, 4)) {
 			field->type = PRINT_MAXW;
 			field->name = xstrdup("MaxWall");
 			field->len = 11;
-			field->print_routine = print_time;
+			field->print_routine = print_fields_time;
 		} else if(!strncasecmp("ParentID", object, 7)) {
 			field->type = PRINT_PID;
 			field->name = xstrdup("Par ID");
 			field->len = 6;
-			field->print_routine = print_uint;
+			field->print_routine = print_fields_uint;
 		} else if(!strncasecmp("ParentName", object, 7)) {
 			field->type = PRINT_PNAME;
 			field->name = xstrdup("Par Name");
 			field->len = 10;
-			field->print_routine = print_str;
+			field->print_routine = print_fields_str;
 		} else if(!strncasecmp("Partition", object, 4)) {
 			field->type = PRINT_PART;
 			field->name = xstrdup("Partition");
 			field->len = 10;
-			field->print_routine = print_str;
+			field->print_routine = print_fields_str;
 		} else if(!strncasecmp("User", object, 1)) {
 			field->type = PRINT_USER;
 			field->name = xstrdup("User");
 			field->len = 10;
-			field->print_routine = print_str;
+			field->print_routine = print_fields_str;
 		} else {
 			printf("Unknown field '%s'\n", object);
 			xfree(field);
@@ -317,7 +319,7 @@ extern int sacctmgr_list_association(int argc, char *argv[])
 
 	itr = list_iterator_create(assoc_list);
 	itr2 = list_iterator_create(print_fields_list);
-	print_header(print_fields_list);
+	print_fields_header(print_fields_list);
 
 	while((assoc = list_next(itr))) {
 		while((field = list_next(itr2))) {
@@ -380,8 +382,6 @@ extern int sacctmgr_list_association(int argc, char *argv[])
 		printf("\n");
 	}
 
-	printf("\n");
-
 	list_iterator_destroy(itr2);
 	list_iterator_destroy(itr);
 	list_destroy(assoc_list);
diff --git a/src/sacctmgr/cluster_functions.c b/src/sacctmgr/cluster_functions.c
index 8a099cbce44662b9746c4c6c2e087614e9dde3fa..f3baedd52d83a1003e1b8d9e1daa94c7b0b2bfc0 100644
--- a/src/sacctmgr/cluster_functions.c
+++ b/src/sacctmgr/cluster_functions.c
@@ -38,7 +38,6 @@
 \*****************************************************************************/
 
 #include "src/sacctmgr/sacctmgr.h"
-#include "src/sacctmgr/print.h"
 
 static int _set_cond(int *start, int argc, char *argv[],
 		     List cluster_list,
@@ -53,6 +52,8 @@ static int _set_cond(int *start, int argc, char *argv[],
 		if (strncasecmp (argv[i], "Set", 3) == 0) {
 			i--;
 			break;
+		} else if(!end && !strncasecmp(argv[i], "where", 5)) {
+			continue;
 		} else if(!end) {
 			addto_char_list(cluster_list, argv[i]);
 			set = 1;
@@ -85,6 +86,8 @@ static int _set_rec(int *start, int argc, char *argv[],
 		if (strncasecmp (argv[i], "Where", 5) == 0) {
 			i--;
 			break;
+		} else if(!end && !strncasecmp(argv[i], "set", 3)) {
+			continue;
 		} else if(!end) {
 			printf(" Bad format on %s: End your option with "
 			       "an '=' sign\n", argv[i]);			
@@ -333,6 +336,7 @@ extern int sacctmgr_list_cluster(int argc, char *argv[])
 	destroy_acct_cluster_cond(cluster_cond);
 	
 	if(!cluster_list) {
+		printf(" Problem with query.\n");
 		list_destroy(format_list);
 		return SLURM_ERROR;
 	}
@@ -351,42 +355,42 @@ extern int sacctmgr_list_cluster(int argc, char *argv[])
 			field->type = PRINT_CLUSTER;
 			field->name = xstrdup("Cluster");
 			field->len = 10;
-			field->print_routine = print_str;
+			field->print_routine = print_fields_str;
 		} else if(!strncasecmp("ControlHost", object, 8)) {
 			field->type = PRINT_CHOST;
 			field->name = xstrdup("Control Host");
 			field->len = 12;
-			field->print_routine = print_str;
+			field->print_routine = print_fields_str;
 		} else if(!strncasecmp("ControlPort", object, 8)) {
 			field->type = PRINT_CPORT;
 			field->name = xstrdup("Control Port");
 			field->len = 12;
-			field->print_routine = print_uint;
+			field->print_routine = print_fields_uint;
 		} else if(!strncasecmp("FairShare", object, 1)) {
 			field->type = PRINT_FAIRSHARE;
 			field->name = xstrdup("FairShare");
 			field->len = 9;
-			field->print_routine = print_uint;
+			field->print_routine = print_fields_uint;
 		} else if(!strncasecmp("MaxCPUSecs", object, 4)) {
 			field->type = PRINT_MAXC;
 			field->name = xstrdup("MaxCPUSecs");
 			field->len = 11;
-			field->print_routine = print_uint;
+			field->print_routine = print_fields_uint;
 		} else if(!strncasecmp("MaxJobs", object, 4)) {
 			field->type = PRINT_MAXJ;
 			field->name = xstrdup("MaxJobs");
 			field->len = 7;
-			field->print_routine = print_uint;
+			field->print_routine = print_fields_uint;
 		} else if(!strncasecmp("MaxNodes", object, 4)) {
 			field->type = PRINT_MAXN;
 			field->name = xstrdup("MaxNodes");
 			field->len = 8;
-			field->print_routine = print_uint;
+			field->print_routine = print_fields_uint;
 		} else if(!strncasecmp("MaxWall", object, 4)) {
 			field->type = PRINT_MAXW;
 			field->name = xstrdup("MaxWall");
 			field->len = 11;
-			field->print_routine = print_time;
+			field->print_routine = print_fields_time;
 		} else {
 			printf("Unknown field '%s'\n", object);
 			xfree(field);
@@ -398,7 +402,7 @@ extern int sacctmgr_list_cluster(int argc, char *argv[])
 
 	itr = list_iterator_create(cluster_list);
 	itr2 = list_iterator_create(print_fields_list);
-	print_header(print_fields_list);
+	print_fields_header(print_fields_list);
 
 	while((cluster = list_next(itr))) {
 		while((field = list_next(itr2))) {
@@ -449,8 +453,6 @@ extern int sacctmgr_list_cluster(int argc, char *argv[])
 		printf("\n");
 	}
 
-	printf("\n");
-
 	list_iterator_destroy(itr2);
 	list_iterator_destroy(itr);
 	list_destroy(cluster_list);
diff --git a/src/sacctmgr/common.c b/src/sacctmgr/common.c
index 36b2041259d0fcd1afd26f4688d82613b7a11ee7..ee4033af8ed6807a4d6f12022ed1c5f78149e929 100644
--- a/src/sacctmgr/common.c
+++ b/src/sacctmgr/common.c
@@ -174,68 +174,6 @@ extern void addto_char_list(List char_list, char *names)
 	list_iterator_destroy(itr);
 } 
 
-extern void destroy_sacctmgr_action(void *object)
-{
-	sacctmgr_action_t *action = (sacctmgr_action_t *)object;
-	
-	if(action) {
-		if(action->list)
-			list_destroy(action->list);
-			
-		switch(action->type) {
-		case SACCTMGR_ACTION_NOTSET:
-		case SACCTMGR_USER_CREATE:
-		case SACCTMGR_ACCOUNT_CREATE:
-		case SACCTMGR_CLUSTER_CREATE:
-		case SACCTMGR_ASSOCIATION_CREATE:
-			/* These only have a list so there isn't
-			 * anything else to free 
-			 */
-			break;
-		case SACCTMGR_USER_MODIFY:
-			destroy_acct_user_rec(action->rec);
-			destroy_acct_user_cond(action->cond);
-			break;
-		case SACCTMGR_USER_DELETE:
-			destroy_acct_user_cond(action->cond);
-			break;
-		case SACCTMGR_ACCOUNT_MODIFY:
-			destroy_acct_account_rec(action->rec);
-			destroy_acct_account_cond(action->cond);
-			break;
-		case SACCTMGR_ACCOUNT_DELETE:
-			destroy_acct_account_cond(action->cond);
-			break;
-		case SACCTMGR_CLUSTER_MODIFY:
-			destroy_acct_cluster_rec(action->rec);
-			destroy_acct_cluster_cond(action->cond);
-			break;
-		case SACCTMGR_CLUSTER_DELETE:
-			destroy_acct_cluster_cond(action->cond);
-			break;
-		case SACCTMGR_ASSOCIATION_MODIFY:
-			destroy_acct_association_rec(action->rec);
-			destroy_acct_association_cond(action->cond);
-			break;
-		case SACCTMGR_ASSOCIATION_DELETE:
-			destroy_acct_association_cond(action->cond);
-			break;
-		case SACCTMGR_COORD_CREATE:
-			xfree(action->rec);
-			destroy_acct_user_cond(action->cond);
-			break;
-		case SACCTMGR_COORD_DELETE:
-			xfree(action->rec);
-			destroy_acct_user_cond(action->cond);
-			break;	
-		default:
-			error("unknown action %d", action->type);
-			break;
-		}
-		xfree(action);
-	}
-}
-
 extern int notice_thread_init()
 {
 	pthread_attr_t attr;
@@ -603,3 +541,4 @@ extern int get_uint(char *in_value, uint32_t *out_value, char *type)
 		*out_value = (uint32_t) num;
 	return SLURM_SUCCESS;
 }
+
diff --git a/src/sacctmgr/sacctmgr.c b/src/sacctmgr/sacctmgr.c
index c56e76ddec202171cf7f422f58a166b0ca8a738e..b19eebb2fb04b10441706ea2ecc73d8864a2917d 100644
--- a/src/sacctmgr/sacctmgr.c
+++ b/src/sacctmgr/sacctmgr.c
@@ -39,7 +39,6 @@
 \*****************************************************************************/
 
 #include "src/sacctmgr/sacctmgr.h"
-#include "src/sacctmgr/print.h"
 #include "src/common/xsignal.h"
 
 #define OPT_LONG_HIDE   0x102
@@ -143,10 +142,10 @@ main (int argc, char *argv[])
 			one_liner = 1;
 			break;
 		case (int)'n':
-			have_header = 0;
+			print_fields_have_header = 0;
 			break;
 		case (int)'p':
-			parsable_print = 1;
+			print_fields_parsable_print = 1;
 			break;
 		case (int)'q':
 			quiet_flag = 1;
@@ -197,7 +196,6 @@ main (int argc, char *argv[])
 
 	acct_storage_g_close_connection(&db_conn);
 	slurm_acct_storage_fini();
-	printf("\n");
 	exit(exit_code);
 }
 
@@ -363,6 +361,7 @@ _process_command (int argc, char *argv[])
 		}
 		quiet_flag = 1;
 	} else if ((strncasecmp (argv[0], "exit", 4) == 0) ||
+		   (strncasecmp (argv[0], "\\q", 2) == 0) ||
 		   (strncasecmp (argv[0], "quit", 4) == 0)) {
 		if (argc > 1) {
 			exit_code = 1;
@@ -464,15 +463,17 @@ static void _add_it (int argc, char *argv[])
 	/* First identify the entity to add */
 	if (strncasecmp (argv[0], "User", 1) == 0) {
 		error_code = sacctmgr_add_user((argc - 1), &argv[1]);
+	} else if (strncasecmp (argv[0], "Cluster", 2) == 0) {
+		error_code = sacctmgr_add_cluster((argc - 1), &argv[1]);
+	} else if (strncasecmp (argv[0], "Coordinator", 2) == 0) {
+		error_code = sacctmgr_add_coord((argc - 1), &argv[1]);
 	} else if (strncasecmp (argv[0], "Account", 1) == 0) {
 		error_code = sacctmgr_add_account((argc - 1), &argv[1]);
-	} else if (strncasecmp (argv[0], "Cluster", 1) == 0) {
-		error_code = sacctmgr_add_cluster((argc - 1), &argv[1]);
 	} else {
 		exit_code = 1;
 		fprintf(stderr, "No valid entity in add command\n");
 		fprintf(stderr, "Input line must include, ");
-		fprintf(stderr, "\"User\", \"Account\", ");
+		fprintf(stderr, "\"User\", \"Account\", \"Coordinator\", ");
 		fprintf(stderr, "or \"Cluster\"\n");
 	}
 	
@@ -556,13 +557,15 @@ static void _delete_it (int argc, char *argv[])
 		error_code = sacctmgr_delete_user((argc - 1), &argv[1]);
 	} else if (strncasecmp (argv[0], "Account", 1) == 0) {
 		error_code = sacctmgr_delete_account((argc - 1), &argv[1]);
-	} else if (strncasecmp (argv[0], "Cluster", 1) == 0) {
+	} else if (strncasecmp (argv[0], "Cluster", 2) == 0) {
 		error_code = sacctmgr_delete_cluster((argc - 1), &argv[1]);
+	} else if (strncasecmp (argv[0], "Coordinator", 2) == 0) {
+		error_code = sacctmgr_delete_coord((argc - 1), &argv[1]);
 	} else {
 		exit_code = 1;
 		fprintf(stderr, "No valid entity in delete command\n");
 		fprintf(stderr, "Input line must include ");
-		fprintf(stderr, "\"User\", \"Account\", ");
+		fprintf(stderr, "\"User\", \"Account\", \"Coordinator\", ");
 		fprintf(stderr, "or \"Cluster\"\n");
 	}
 	
@@ -1168,55 +1171,55 @@ static void _load_file (int argc, char *argv[])
 
 	admin_field.name = "Admin";
 	admin_field.len = 9;
-	admin_field.print_routine = print_str;
+	admin_field.print_routine = print_fields_str;
 
 	name_field.name = "Name";
 	name_field.len = 10;
-	name_field.print_routine = print_str;
+	name_field.print_routine = print_fields_str;
 		
 	parent_field.name = "Parent";
 	parent_field.len = 10;
-	parent_field.print_routine = print_str;
+	parent_field.print_routine = print_fields_str;
 	
 	acct_field.name = "Account";
 	acct_field.len = 10;
-	acct_field.print_routine = print_str;
+	acct_field.print_routine = print_fields_str;
 	
 	dacct_field.name = "Def Acct";
 	dacct_field.len = 10;
-	dacct_field.print_routine = print_str;
+	dacct_field.print_routine = print_fields_str;
 	
 	desc_field.name = "Descr";
 	desc_field.len = 10;
-	desc_field.print_routine = print_str;
+	desc_field.print_routine = print_fields_str;
 	
 	org_field.name = "Org";
 	org_field.len = 10;
-	org_field.print_routine = print_str;
+	org_field.print_routine = print_fields_str;
 	
 	qos_field.name = "QOS";
 	qos_field.len = 9;
-	qos_field.print_routine = print_str;
+	qos_field.print_routine = print_fields_str;
 	
 	fs_field.name = "FairShare";
 	fs_field.len = 10;
-	fs_field.print_routine = print_uint;
+	fs_field.print_routine = print_fields_uint;
 
 	mc_field.name = "MaxCPUSecs";
 	mc_field.len = 10;
-	mc_field.print_routine = print_uint;
+	mc_field.print_routine = print_fields_uint;
 
 	mj_field.name = "MaxJobs";
 	mj_field.len = 7;
-	mj_field.print_routine = print_uint;
+	mj_field.print_routine = print_fields_uint;
 
 	mn_field.name = "MaxNodes";
 	mn_field.len = 8;
-	mn_field.print_routine = print_uint;
+	mn_field.print_routine = print_fields_uint;
 
 	mw_field.name = "MaxWall";
 	mw_field.len = 7;
-	mw_field.print_routine = print_time;
+	mw_field.print_routine = print_fields_time;
 		
 	START_TIMER;
 	if(rc == SLURM_SUCCESS && list_count(acct_list)) {
@@ -1228,17 +1231,17 @@ static void _load_file (int argc, char *argv[])
 		list_append(print_fields_list, &org_field);
 		list_append(print_fields_list, &qos_field);
 
-		print_header(print_fields_list);
+		print_fields_header(print_fields_list);
 
 		itr = list_iterator_create(acct_list);
 		while((acct = list_next(itr))) {
-			print_str(SLURM_PRINT_VALUE, &name_field, 
+			print_fields_str(SLURM_PRINT_VALUE, &name_field, 
 				  acct->name);
-			print_str(SLURM_PRINT_VALUE, &desc_field, 
+			print_fields_str(SLURM_PRINT_VALUE, &desc_field, 
 				  acct->description);
-			print_str(SLURM_PRINT_VALUE, &org_field, 
+			print_fields_str(SLURM_PRINT_VALUE, &org_field, 
 				  acct->organization);
-			print_str(SLURM_PRINT_VALUE, &qos_field, 
+			print_fields_str(SLURM_PRINT_VALUE, &qos_field, 
 				  acct_qos_str(acct->qos));
 			printf("\n");
 		}
@@ -1261,23 +1264,24 @@ static void _load_file (int argc, char *argv[])
 		list_append(print_fields_list, &mn_field);
 		list_append(print_fields_list, &mw_field);
 
-		print_header(print_fields_list);
+		print_fields_header(print_fields_list);
 		
 		itr = list_iterator_create(acct_assoc_list);
 		while((assoc = list_next(itr))) {
-			print_str(SLURM_PRINT_VALUE, &name_field, assoc->acct);
-			print_str(SLURM_PRINT_VALUE, &parent_field, 
-				  assoc->parent_acct);
-			print_uint(SLURM_PRINT_VALUE, &fs_field, 
-				   assoc->fairshare);
-			print_uint(SLURM_PRINT_VALUE, &mc_field, 
-				   assoc->max_cpu_secs_per_job);
-			print_uint(SLURM_PRINT_VALUE, &mj_field, 
-				   assoc->max_jobs);
-			print_uint(SLURM_PRINT_VALUE, &mn_field, 
-				   assoc->max_nodes_per_job);
-			print_time(SLURM_PRINT_VALUE, &mw_field,
-				   assoc->max_wall_duration_per_job);
+			print_fields_str(SLURM_PRINT_VALUE, &name_field,
+					 assoc->acct);
+			print_fields_str(SLURM_PRINT_VALUE, &parent_field, 
+					 assoc->parent_acct);
+			print_fields_uint(SLURM_PRINT_VALUE, &fs_field, 
+					  assoc->fairshare);
+			print_fields_uint(SLURM_PRINT_VALUE, &mc_field, 
+					  assoc->max_cpu_secs_per_job);
+			print_fields_uint(SLURM_PRINT_VALUE, &mj_field, 
+					  assoc->max_jobs);
+			print_fields_uint(SLURM_PRINT_VALUE, &mn_field, 
+					  assoc->max_nodes_per_job);
+			print_fields_time(SLURM_PRINT_VALUE, &mw_field,
+					  assoc->max_wall_duration_per_job);
 			printf("\n");
 		}
 		list_iterator_destroy(itr);
@@ -1297,17 +1301,19 @@ static void _load_file (int argc, char *argv[])
 		list_append(print_fields_list, &qos_field);
 		list_append(print_fields_list, &admin_field);
 
-		print_header(print_fields_list);
+		print_fields_header(print_fields_list);
 
 		itr = list_iterator_create(user_list);
 		while((acct = list_next(itr))) {
-			print_str(SLURM_PRINT_VALUE, &name_field, user->name);
-			print_str(SLURM_PRINT_VALUE, &dacct_field, 
-				  user->default_acct);
-			print_str(SLURM_PRINT_VALUE, &qos_field, 
-				  acct_qos_str(user->qos));
-			print_str(SLURM_PRINT_VALUE, &admin_field,
-				  acct_admin_level_str(user->admin_level));
+			print_fields_str(SLURM_PRINT_VALUE, &name_field,
+					 user->name);
+			print_fields_str(SLURM_PRINT_VALUE, &dacct_field, 
+					 user->default_acct);
+			print_fields_str(SLURM_PRINT_VALUE, &qos_field, 
+					 acct_qos_str(user->qos));
+			print_fields_str(SLURM_PRINT_VALUE, &admin_field,
+					 acct_admin_level_str(
+						 user->admin_level));
 			printf("\n");
 		}
 		list_iterator_destroy(itr);
@@ -1330,21 +1336,23 @@ static void _load_file (int argc, char *argv[])
 		list_append(print_fields_list, &mn_field);
 		list_append(print_fields_list, &mw_field);
 
-		print_header(print_fields_list);
+		print_fields_header(print_fields_list);
 		
 		itr = list_iterator_create(user_assoc_list);
 		while((assoc = list_next(itr))) {
-			print_str(SLURM_PRINT_VALUE, &name_field, assoc->user);
-			print_str(SLURM_PRINT_VALUE, &acct_field, assoc->acct);
-			print_uint(SLURM_PRINT_VALUE, &fs_field, 
+			print_fields_str(SLURM_PRINT_VALUE, &name_field,
+					 assoc->user);
+			print_fields_str(SLURM_PRINT_VALUE, &acct_field,
+					 assoc->acct);
+			print_fields_uint(SLURM_PRINT_VALUE, &fs_field, 
 				   assoc->fairshare);
-			print_uint(SLURM_PRINT_VALUE, &mc_field, 
+			print_fields_uint(SLURM_PRINT_VALUE, &mc_field, 
 				   assoc->max_cpu_secs_per_job);
-			print_uint(SLURM_PRINT_VALUE, &mj_field, 
+			print_fields_uint(SLURM_PRINT_VALUE, &mj_field, 
 				   assoc->max_jobs);
-			print_uint(SLURM_PRINT_VALUE, &mn_field, 
+			print_fields_uint(SLURM_PRINT_VALUE, &mn_field, 
 				   assoc->max_nodes_per_job);
-			print_uint(SLURM_PRINT_VALUE, &mw_field,
+			print_fields_uint(SLURM_PRINT_VALUE, &mw_field,
 				   assoc->max_wall_duration_per_job);
 			printf("\n");
 		}
diff --git a/src/sacctmgr/sacctmgr.h b/src/sacctmgr/sacctmgr.h
index 00b665f568c23b17a77549549f9b6f187a3b9acd..df5f380b0de20975d75462ee3b74100a3e449e0c 100644
--- a/src/sacctmgr/sacctmgr.h
+++ b/src/sacctmgr/sacctmgr.h
@@ -81,37 +81,11 @@
 #include "src/common/parse_time.h"
 #include "src/common/slurm_accounting_storage.h"
 #include "src/common/xstring.h"
+#include "src/common/print_fields.h"
 
 #define CKPT_WAIT	10
 #define	MAX_INPUT_FIELDS 128
 
-typedef enum {
-	SACCTMGR_ACTION_NOTSET,
-	SACCTMGR_USER_CREATE,
-	SACCTMGR_USER_MODIFY,
-	SACCTMGR_USER_DELETE,
-	SACCTMGR_ACCOUNT_CREATE,
-	SACCTMGR_ACCOUNT_MODIFY,
-	SACCTMGR_ACCOUNT_DELETE,
-	SACCTMGR_CLUSTER_CREATE,
-	SACCTMGR_CLUSTER_MODIFY,
-	SACCTMGR_CLUSTER_DELETE,
-	SACCTMGR_ASSOCIATION_CREATE,
-	SACCTMGR_ASSOCIATION_MODIFY,
-	SACCTMGR_ASSOCIATION_DELETE,
-	SACCTMGR_COORD_CREATE,
-	SACCTMGR_COORD_DELETE
-} sacctmgr_action_type_t;
-
-typedef struct {
-	sacctmgr_action_type_t type;
-	void *cond; /* if the action has a condition typecast to an
-		     * account_*_cond_t * */
-	void *rec; /* if the action has a record typecast to an
-		    * account_*_rec_t * or char * for type COORD */
-	List list; /* if the action has a list */
-} sacctmgr_action_t;
-
 extern char *command_name;
 extern int exit_code;	/* sacctmgr's exit code, =1 on any error at any time */
 extern int exit_flag;	/* program to terminate if =1 */
@@ -127,6 +101,7 @@ extern int sacctmgr_add_association(int argc, char *argv[]);
 extern int sacctmgr_add_user(int argc, char *argv[]);
 extern int sacctmgr_add_account(int argc, char *argv[]);
 extern int sacctmgr_add_cluster(int argc, char *argv[]);
+extern int sacctmgr_add_coord(int argc, char *argv[]);
 
 extern int sacctmgr_list_association(int argc, char *argv[]);
 extern int sacctmgr_list_user(int argc, char *argv[]);
@@ -142,6 +117,7 @@ extern int sacctmgr_delete_association(int argc, char *argv[]);
 extern int sacctmgr_delete_user(int argc, char *argv[]);
 extern int sacctmgr_delete_account(int argc, char *argv[]);
 extern int sacctmgr_delete_cluster(int argc, char *argv[]);
+extern int sacctmgr_delete_coord(int argc, char *argv[]);
 
 /* common.c */
 extern int parse_option_end(char *option);
diff --git a/src/sacctmgr/user_functions.c b/src/sacctmgr/user_functions.c
index 5891753a8beb0313b591d9c3865c5a0e1e04a6df..1daa8de10fea64184b6e9e6df9853df26b7a675e 100644
--- a/src/sacctmgr/user_functions.c
+++ b/src/sacctmgr/user_functions.c
@@ -37,7 +37,6 @@
 \*****************************************************************************/
 
 #include "src/sacctmgr/sacctmgr.h"
-#include "src/sacctmgr/print.h"
 
 static int _set_cond(int *start, int argc, char *argv[],
 		     acct_user_cond_t *user_cond,
@@ -55,6 +54,8 @@ static int _set_cond(int *start, int argc, char *argv[],
 			break;
 		} else if (strncasecmp (argv[i], "WithAssoc", 4) == 0) {
 			user_cond->with_assocs = 1;
+		} else if(!end && !strncasecmp(argv[i], "where", 5)) {
+			continue;
 		} else if(!end) {
 			addto_char_list(user_cond->user_list, argv[i]);
 			addto_char_list(user_cond->assoc_cond->user_list,
@@ -121,6 +122,8 @@ static int _set_rec(int *start, int argc, char *argv[],
 		if (strncasecmp (argv[i], "Where", 5) == 0) {
 			i--;
 			break;
+		} else if(!end && !strncasecmp(argv[i], "set", 3)) {
+			continue;
 		} else if(!end) {
 			printf(" Bad format on %s: End your option with "
 			       "an '=' sign\n", argv[i]);
@@ -129,27 +132,37 @@ static int _set_rec(int *start, int argc, char *argv[],
 				str_2_acct_admin_level(argv[i]+end);
 			u_set = 1;
 		} else if (strncasecmp (argv[i], "DefaultAccount", 1) == 0) {
-			user->default_acct = xstrdup(argv[i]+end);
+			user->default_acct = strip_quotes(argv[i]+end, NULL);
 			u_set = 1;
 		} else if (strncasecmp (argv[i], "FairShare", 1) == 0) {
+			if(!association)
+				continue;
 			if (get_uint(argv[i]+end, &association->fairshare, 
 			    "FairShare") == SLURM_SUCCESS)
 				a_set = 1;
 		} else if (strncasecmp (argv[i], "MaxCPUSec", 4) == 0) {
+			if(!association)
+				continue;
 			if (get_uint(argv[i]+end, 
 			     &association->max_cpu_secs_per_job, 
 			    "MaxCPUSec") == SLURM_SUCCESS)
 				a_set = 1;
 		} else if (strncasecmp (argv[i], "MaxJobs", 4) == 0) {
+			if(!association)
+				continue;
 			if (get_uint(argv[i]+end, &association->max_jobs, 
 			    "MaxJobs") == SLURM_SUCCESS)
 				a_set = 1;
 		} else if (strncasecmp (argv[i], "MaxNodes", 4) == 0) {
+			if(!association)
+				continue;
 			if (get_uint(argv[i]+end,
 			    &association->max_nodes_per_job, 
 			    "MaxNodes") == SLURM_SUCCESS)
 				a_set = 1;
 		} else if (strncasecmp (argv[i], "MaxWall", 4) == 0) {
+			if(!association)
+				continue;
 			mins = time_str2mins(argv[i]+end);
 			if (mins != NO_VAL) {
 				association->max_wall_duration_per_job 
@@ -295,9 +308,9 @@ extern int sacctmgr_add_user(int argc, char *argv[])
 			addto_char_list(assoc_cond->cluster_list,
 					argv[i]+end);
 		} else if (strncasecmp (argv[i], "DefaultAccount", 1) == 0) {
-			default_acct = xstrdup(argv[i]+end);
+			default_acct = strip_quotes(argv[i]+end, NULL);
 			addto_char_list(assoc_cond->acct_list,
-					argv[i]+end);
+					default_acct);
 		} else if (strncasecmp (argv[i], "FairShare", 1) == 0) {
 			if (get_uint(argv[i]+end, &fairshare, 
 			    "FairShare") == SLURM_SUCCESS)
@@ -633,8 +646,7 @@ no_default:
 
 	notice_thread_init();
 	if(list_count(user_list)) {
-		rc = acct_storage_g_add_users(db_conn, my_uid, 
-					      user_list);
+		rc = acct_storage_g_add_users(db_conn, my_uid, user_list);
 	}
 
 	if(rc == SLURM_SUCCESS) {
@@ -669,6 +681,93 @@ end_it:
 	return rc;
 }
 
+extern int sacctmgr_add_coord(int argc, char *argv[])
+{
+	int rc = SLURM_SUCCESS;
+	int i=0;
+	int cond_set = 0;
+	acct_user_cond_t *user_cond = xmalloc(sizeof(acct_user_cond_t));
+	char *name = NULL;
+	char *user_str = NULL;
+	char *acct_str = NULL;
+	ListIterator itr = NULL;
+
+	user_cond->user_list = list_create(slurm_destroy_char);
+	user_cond->def_acct_list = list_create(slurm_destroy_char);
+	
+	user_cond->assoc_cond = xmalloc(sizeof(acct_association_cond_t));
+	user_cond->assoc_cond->user_list = list_create(slurm_destroy_char);
+	user_cond->assoc_cond->acct_list = list_create(slurm_destroy_char);
+	user_cond->assoc_cond->cluster_list = list_create(slurm_destroy_char);
+	user_cond->assoc_cond->partition_list = list_create(slurm_destroy_char);
+	user_cond->assoc_cond->fairshare = NO_VAL;
+	user_cond->assoc_cond->max_cpu_secs_per_job = NO_VAL;
+	user_cond->assoc_cond->max_jobs = NO_VAL;
+	user_cond->assoc_cond->max_nodes_per_job = NO_VAL;
+	user_cond->assoc_cond->max_wall_duration_per_job = NO_VAL;
+
+	for (i=0; i<argc; i++) {
+		cond_set = _set_cond(&i, argc, argv, user_cond, NULL);
+	}
+
+	if(!cond_set) {
+		printf(" You need to specify a user list "
+		       "and account list here.\n"); 
+		destroy_acct_user_cond(user_cond);
+		return SLURM_ERROR;
+	}
+
+	itr = list_iterator_create(user_cond->user_list);
+	while((name = list_next(itr))) {
+		xstrfmtcat(user_str, "  %s\n", name);
+
+	}
+	list_iterator_destroy(itr);
+
+	if(!user_str) {
+		printf(" You need to specify a user list "
+		       "and account list here.\n"); 
+		destroy_acct_user_cond(user_cond);
+		return SLURM_ERROR;		
+	}
+	itr = list_iterator_create(user_cond->assoc_cond->acct_list);
+	while((name = list_next(itr))) {
+		xstrfmtcat(acct_str, "  %s\n", name);
+
+	}
+	list_iterator_destroy(itr);
+	if(!acct_str) {
+		printf(" You need to specify a user list "
+		       "and account list here.\n"); 
+		destroy_acct_user_cond(user_cond);
+		return SLURM_ERROR;		
+	}
+
+	printf(" Adding Coordinator User(s)\n%s", user_str);
+	printf(" To Account(s) and all sub-accounts\n%s", acct_str);
+		
+	notice_thread_init();
+	rc = acct_storage_g_add_coord(db_conn, my_uid, 
+				      user_cond->assoc_cond->acct_list,
+				      user_cond);
+	notice_thread_fini();
+	destroy_acct_user_cond(user_cond);
+		
+	if(rc == SLURM_SUCCESS) {
+		if(commit_check("Would you like to commit changes?")) {
+			acct_storage_g_commit(db_conn, 1);
+		} else {
+			printf(" Changes Discarded\n");
+			acct_storage_g_commit(db_conn, 0);
+		}
+	} else {
+		printf(" error: Problem adding coordinator\n");
+		rc = SLURM_ERROR;
+	}
+
+	return rc;
+}
+
 extern int sacctmgr_list_user(int argc, char *argv[])
 {
 	int rc = SLURM_SUCCESS;
@@ -728,6 +827,7 @@ extern int sacctmgr_list_user(int argc, char *argv[])
 	destroy_acct_user_cond(user_cond);
 
 	if(!user_list) {
+		printf(" Problem with query.\n");
 		list_destroy(format_list);
 		return SLURM_ERROR;
 	}
@@ -741,72 +841,72 @@ extern int sacctmgr_list_user(int argc, char *argv[])
 			field->type = PRINT_ACCOUNT;
 			field->name = xstrdup("Account");
 			field->len = 10;
-			field->print_routine = print_str;
+			field->print_routine = print_fields_str;
 		} else if(!strncasecmp("AdminLevel", object, 2)) {
 			field->type = PRINT_ADMIN;
 			field->name = xstrdup("Admin");
 			field->len = 9;
-			field->print_routine = print_str;
+			field->print_routine = print_fields_str;
 		} else if(!strncasecmp("Cluster", object, 1)) {
 			field->type = PRINT_CLUSTER;
 			field->name = xstrdup("Cluster");
 			field->len = 10;
-			field->print_routine = print_str;
+			field->print_routine = print_fields_str;
 		} else if(!strncasecmp("Default", object, 1)) {
 			field->type = PRINT_DACCT;
 			field->name = xstrdup("Def Acct");
 			field->len = 10;
-			field->print_routine = print_str;
+			field->print_routine = print_fields_str;
 		} else if(!strncasecmp("FairShare", object, 1)) {
 			field->type = PRINT_FAIRSHARE;
 			field->name = xstrdup("FairShare");
 			field->len = 9;
-			field->print_routine = print_uint;
+			field->print_routine = print_fields_uint;
 		} else if(!strncasecmp("ID", object, 1)) {
 			field->type = PRINT_ID;
 			field->name = xstrdup("ID");
 			field->len = 6;
-			field->print_routine = print_uint;
+			field->print_routine = print_fields_uint;
 		} else if(!strncasecmp("MaxCPUSecs", object, 4)) {
 			field->type = PRINT_MAXC;
 			field->name = xstrdup("MaxCPUSecs");
 			field->len = 11;
-			field->print_routine = print_uint;
+			field->print_routine = print_fields_uint;
 		} else if(!strncasecmp("MaxJobs", object, 4)) {
 			field->type = PRINT_MAXJ;
 			field->name = xstrdup("MaxJobs");
 			field->len = 7;
-			field->print_routine = print_uint;
+			field->print_routine = print_fields_uint;
 		} else if(!strncasecmp("MaxNodes", object, 4)) {
 			field->type = PRINT_MAXN;
 			field->name = xstrdup("MaxNodes");
 			field->len = 8;
-			field->print_routine = print_uint;
+			field->print_routine = print_fields_uint;
 		} else if(!strncasecmp("MaxWall", object, 4)) {
 			field->type = PRINT_MAXW;
 			field->name = xstrdup("MaxWall");
 			field->len = 11;
-			field->print_routine = print_time;
+			field->print_routine = print_fields_time;
 		} else if(!strncasecmp("QOS", object, 1)) {
 			field->type = PRINT_QOS;
 			field->name = xstrdup("QOS");
 			field->len = 9;
-			field->print_routine = print_str;
+			field->print_routine = print_fields_str;
 		} else if(!strncasecmp("ParentID", object, 7)) {
 			field->type = PRINT_PID;
 			field->name = xstrdup("Par ID");
 			field->len = 6;
-			field->print_routine = print_uint;
+			field->print_routine = print_fields_uint;
 		} else if(!strncasecmp("Partition", object, 4)) {
 			field->type = PRINT_PART;
 			field->name = xstrdup("Partition");
 			field->len = 10;
-			field->print_routine = print_str;
+			field->print_routine = print_fields_str;
 		} else if(!strncasecmp("User", object, 1)) {
 			field->type = PRINT_USER;
 			field->name = xstrdup("User");
 			field->len = 10;
-			field->print_routine = print_str;
+			field->print_routine = print_fields_str;
 		} else {
 			printf("Unknown field '%s'\n", object);
 			xfree(field);
@@ -818,7 +918,7 @@ extern int sacctmgr_list_user(int argc, char *argv[])
 
 	itr = list_iterator_create(user_list);
 	itr2 = list_iterator_create(print_fields_list);
-	print_header(print_fields_list);
+	print_fields_header(print_fields_list);
 
 	while((user = list_next(itr))) {
 		if(user->assoc_list && list_count(user->assoc_list)) {
@@ -1015,8 +1115,6 @@ extern int sacctmgr_list_user(int argc, char *argv[])
 		}
 	}
 
-	printf("\n");
-
 	list_iterator_destroy(itr2);
 	list_iterator_destroy(itr);
 	list_destroy(user_list);
@@ -1101,7 +1199,6 @@ extern int sacctmgr_modify_user(int argc, char *argv[])
 				printf("  %s\n", object);
 			}
 			list_iterator_destroy(itr);
-			list_destroy(ret_list);
 			set = 1;
 		} else if(ret_list) {
 			printf(" Nothing modified\n");
@@ -1222,3 +1319,110 @@ extern int sacctmgr_delete_user(int argc, char *argv[])
 
 	return rc;
 }
+
+extern int sacctmgr_delete_coord(int argc, char *argv[])
+{
+	int rc = SLURM_SUCCESS;
+	int i=0, set=0;
+	int cond_set = 0;
+	acct_user_cond_t *user_cond = xmalloc(sizeof(acct_user_cond_t));
+	char *name = NULL;
+	char *user_str = NULL;
+	char *acct_str = NULL;
+	ListIterator itr = NULL;
+	List ret_list = NULL;
+
+	user_cond->user_list = list_create(slurm_destroy_char);
+	user_cond->def_acct_list = list_create(slurm_destroy_char);
+	
+	user_cond->assoc_cond = xmalloc(sizeof(acct_association_cond_t));
+	user_cond->assoc_cond->user_list = list_create(slurm_destroy_char);
+	user_cond->assoc_cond->acct_list = list_create(slurm_destroy_char);
+	user_cond->assoc_cond->cluster_list = list_create(slurm_destroy_char);
+	user_cond->assoc_cond->partition_list = list_create(slurm_destroy_char);
+	user_cond->assoc_cond->fairshare = NO_VAL;
+	user_cond->assoc_cond->max_cpu_secs_per_job = NO_VAL;
+	user_cond->assoc_cond->max_jobs = NO_VAL;
+	user_cond->assoc_cond->max_nodes_per_job = NO_VAL;
+	user_cond->assoc_cond->max_wall_duration_per_job = NO_VAL;
+
+	for (i=0; i<argc; i++) {
+		cond_set = _set_cond(&i, argc, argv, user_cond, NULL);
+	}
+
+	if(!cond_set) {
+		printf(" You need to specify a user list "
+		       "or account list here.\n"); 
+		destroy_acct_user_cond(user_cond);
+		return SLURM_ERROR;
+	}
+
+	itr = list_iterator_create(user_cond->user_list);
+	while((name = list_next(itr))) {
+		xstrfmtcat(user_str, "  %s\n", name);
+
+	}
+	list_iterator_destroy(itr);
+
+	itr = list_iterator_create(user_cond->assoc_cond->acct_list);
+	while((name = list_next(itr))) {
+		xstrfmtcat(acct_str, "  %s\n", name);
+
+	}
+	list_iterator_destroy(itr);
+	if(!user_str && !acct_str) {
+		printf(" You need to specify a user list "
+		       "or an account list here.\n"); 
+		destroy_acct_user_cond(user_cond);
+		return SLURM_ERROR;		
+	}
+	/* FIX ME: This list should be recieved from the slurmdbd not
+	 * just assumed.  Right now it doesn't do it correctly though.
+	 * This is why we are doing it this way.
+	 */
+	if(user_str) {
+		printf(" Removing Coordinators with user name\n%s", user_str);
+		if(acct_str)
+			printf(" From Account(s)\n%s", acct_str);
+		else
+			printf(" From all accounts\n");
+	} else 
+		printf(" Removing all users from Accounts\n%s", acct_str);
+		
+	notice_thread_init();
+        ret_list = acct_storage_g_remove_coord(db_conn, my_uid, 
+					       user_cond->assoc_cond->acct_list,
+					       user_cond);
+	destroy_acct_user_cond(user_cond);
+
+
+	if(ret_list && list_count(ret_list)) {
+		char *object = NULL;
+		ListIterator itr = list_iterator_create(ret_list);
+		printf(" Removed Coordinators (sub accounts not listed)...\n");
+		while((object = list_next(itr))) {
+			printf("  %s\n", object);
+		}
+		list_iterator_destroy(itr);
+		set = 1;
+	} else if(ret_list) {
+		printf(" Nothing removed\n");
+	} else {
+		printf(" Error with request\n");
+		rc = SLURM_ERROR;
+	}
+
+	if(ret_list)
+		list_destroy(ret_list);
+	notice_thread_fini();
+	if(set) {
+		if(commit_check("Would you like to commit changes?")) 
+			acct_storage_g_commit(db_conn, 1);
+		else {
+			printf(" Changes Discarded\n");
+			acct_storage_g_commit(db_conn, 0);
+		}
+	}
+
+	return rc;
+}
diff --git a/src/salloc/opt.c b/src/salloc/opt.c
index 652c91eb6d21fdbf17ebb815551000ed82574dff..7e5992c186fd59c2364f8fa776a06d93d5908261 100644
--- a/src/salloc/opt.c
+++ b/src/salloc/opt.c
@@ -280,7 +280,7 @@ static void _opt_default()
 		opt.geometry[i]	    = (uint16_t) NO_VAL;
 	opt.reboot          = false;
 	opt.no_rotate	    = false;
-	opt.conn_type	    = -1;
+	opt.conn_type	    = (uint16_t) NO_VAL;
 
 	opt.euid	    = (uid_t) -1;
 	opt.egid	    = (gid_t) -1;
@@ -1282,7 +1282,7 @@ static void _opt_list()
 	str = print_constraints();
 	info("constraints    : %s", str);
 	xfree(str);
-	if (opt.conn_type >= 0)
+	if (opt.conn_type != (uint16_t) NO_VAL)
 		info("conn_type      : %u", opt.conn_type);
 	str = print_geometry(opt.geometry);
 	info("geometry       : %s", str);
diff --git a/src/salloc/opt.h b/src/salloc/opt.h
index 3f964a93797c0e0f3b5d8d3b21d343ea0011f51b..46138a9ece48778697c2f0fe060a1dde21a6cc8a 100644
--- a/src/salloc/opt.h
+++ b/src/salloc/opt.h
@@ -119,7 +119,7 @@ typedef struct salloc_options {
 	uint16_t geometry[SYSTEM_DIMENSIONS]; /* --geometry, -g	*/
 	bool reboot;		/* --reboot			*/
 	bool no_rotate;		/* --no_rotate, -R		*/
-	int16_t conn_type;	/* --conn-type 			*/
+	uint16_t conn_type;	/* --conn-type 			*/
 	char *blrtsimage;       /* --blrts-image BlrtsImage for block */
 	char *linuximage;       /* --linux-image LinuxImage for block */
 	char *mloaderimage;     /* --mloader-image mloaderImage for block */
diff --git a/src/salloc/salloc.c b/src/salloc/salloc.c
index df7531c47e3b62143a97b733d1e587143bcb87ba..be782a7f31e8eb5f196b0f3bcc81159b1962f383 100644
--- a/src/salloc/salloc.c
+++ b/src/salloc/salloc.c
@@ -331,7 +331,7 @@ static int fill_job_desc_from_opts(job_desc_msg_t *desc)
 			desc->geometry[i] = opt.geometry[i];
 	}
 #endif
-	if (opt.conn_type != -1)
+	if (opt.conn_type != (uint16_t)NO_VAL)
 		desc->conn_type = opt.conn_type;
 	if (opt.reboot)
 		desc->reboot = 1;
diff --git a/src/sbatch/sbatch.c b/src/sbatch/sbatch.c
index 1b46e78dd1ba775e2186dbcd8c866fd9b1744e16..b0da0b4dc6e362525e941060fb164bef2deee6ef 100644
--- a/src/sbatch/sbatch.c
+++ b/src/sbatch/sbatch.c
@@ -1,9 +1,10 @@
 /*****************************************************************************\
  *  sbatch.c - Submit a SLURM batch script.
  *
- *  $Id: sbatch.c 14068 2008-05-19 15:58:22Z jette $
+ *  $Id: sbatch.c 14226 2008-06-10 21:12:53Z jette $
  *****************************************************************************
- *  Copyright (C) 2006 The Regents of the University of California.
+ *  Copyright (C) 2006-2007 The Regents of the University of California.
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Christopher J. Morrone <morrone2@llnl.gov>
  *  LLNL-CODE-402394.
@@ -263,16 +264,9 @@ static int fill_job_desc_from_opts(job_desc_msg_t *desc)
 
 	desc->environment = NULL;
 	if (opt.get_user_env_time >= 0) {
-		struct passwd *pw = NULL;
-		pw = getpwuid(opt.uid);
-		if (pw != NULL) {
-			desc->environment = env_array_user_default(
-						pw->pw_name,
-						opt.get_user_env_time,
-						opt.get_user_env_mode);
-			if (desc->environment == NULL)
-				exit(1);	/* error already logged */
-		}
+		desc->environment = env_array_create();
+		env_array_overwrite(&desc->environment,
+				    "SLURM_GET_USER_ENV", "1");
 	}
 	env_array_merge(&desc->environment, (const char **)environ);
 	desc->env_size = envcount (desc->environment);
diff --git a/src/scontrol/scontrol.c b/src/scontrol/scontrol.c
index 5ffd7091c7413ee6f0534d62ba003ee11b6d40a6..9106ba5164e85ef0d4e81b5498ae631f4ca40573 100644
--- a/src/scontrol/scontrol.c
+++ b/src/scontrol/scontrol.c
@@ -3,6 +3,8 @@
  *	provides interface to read, write, update, and configurations.
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Portions Copyright (C) 2008 Vijay Ramasubramanian.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
  *  LLNL-CODE-402394.
@@ -419,6 +421,9 @@ _print_daemons (void)
 	if ((n = slurm_conf_get_nodename(me))) {
 		d = 1;
 		xfree(n);
+	} else if ((n = slurm_conf_get_aliased_nodename())) {
+		d = 1;
+		xfree(n);
 	} else if ((n = slurm_conf_get_nodename("localhost"))) {
 		d = 1;
 		xfree(n);
@@ -526,7 +531,8 @@ _process_command (int argc, char *argv[])
 		}
 		_print_ping ();
 	}
-	else if (strncasecmp (argv[0], "quiet", 4) == 0) {
+	else if ((strncasecmp (argv[0], "\\q", 2) == 0) ||
+		 (strncasecmp (argv[0], "quiet", 4) == 0)) {
 		if (argc > 1) {
 			exit_code = 1;
 			fprintf (stderr, "too many arguments for keyword:%s\n",
diff --git a/src/sinfo/sinfo.c b/src/sinfo/sinfo.c
index 42239d1adb733c963eb7076bfeffb972fb4ec903..13d797714fcda83808a558895c764278e4ae2510 100644
--- a/src/sinfo/sinfo.c
+++ b/src/sinfo/sinfo.c
@@ -1,7 +1,7 @@
 /*****************************************************************************\
  *  sinfo.c - Report overall state the system
  *
- *  $Id: sinfo.c 13929 2008-04-23 16:11:29Z jette $
+ *  $Id: sinfo.c 14203 2008-06-06 16:58:44Z jette $
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -101,8 +101,12 @@ int main(int argc, char *argv[])
 	List sinfo_list = NULL;
 	int rc = 0;
 
-	log_init(xbasename(argv[0]), opts, SYSLOG_FACILITY_DAEMON, NULL);
+	log_init(xbasename(argv[0]), opts, SYSLOG_FACILITY_USER, NULL);
 	parse_command_line(argc, argv);
+	if (params.verbose) {
+		opts.stderr_level += params.verbose;
+		log_alter(opts, SYSLOG_FACILITY_USER, NULL);
+	}
 
 	while (1) {
 		if ((!params.no_header)
@@ -254,7 +258,7 @@ _query_server(partition_info_msg_t ** part_pptr,
 		    slurm_load_partitions((time_t) NULL, &new_part_ptr,
 					  show_flags);
 	if (error_code) {
-		slurm_perror("slurm_load_part");
+		slurm_perror("slurm_load_partitions");
 		return error_code;
 	}
 
diff --git a/src/slurmctld/Makefile.am b/src/slurmctld/Makefile.am
index 34db68f904dd88f3e2ed031bda5b3f1c5b56ccbd..584b4a2e3b93aa7855d0311df34d10cdcc4d8de5 100644
--- a/src/slurmctld/Makefile.am
+++ b/src/slurmctld/Makefile.am
@@ -14,6 +14,8 @@ slurmctld_LDADD = 					\
 
 
 slurmctld_SOURCES = 	\
+	acct_policy.c	\
+	acct_policy.h	\
 	agent.c  	\
 	agent.h		\
 	backup.c	\
diff --git a/src/slurmctld/Makefile.in b/src/slurmctld/Makefile.in
index c76c5dba49703a07192518c9b0db5edbd5e57b71..f846590c02a58c71b058b2569141e8635c084e14 100644
--- a/src/slurmctld/Makefile.in
+++ b/src/slurmctld/Makefile.in
@@ -71,14 +71,14 @@ CONFIG_CLEAN_FILES =
 am__installdirs = "$(DESTDIR)$(sbindir)"
 sbinPROGRAMS_INSTALL = $(INSTALL_PROGRAM)
 PROGRAMS = $(sbin_PROGRAMS)
-am_slurmctld_OBJECTS = agent.$(OBJEXT) backup.$(OBJEXT) \
-	controller.$(OBJEXT) job_mgr.$(OBJEXT) job_scheduler.$(OBJEXT) \
-	licenses.$(OBJEXT) locks.$(OBJEXT) node_mgr.$(OBJEXT) \
-	node_scheduler.$(OBJEXT) partition_mgr.$(OBJEXT) \
-	ping_nodes.$(OBJEXT) power_save.$(OBJEXT) proc_req.$(OBJEXT) \
-	read_config.$(OBJEXT) sched_plugin.$(OBJEXT) \
-	srun_comm.$(OBJEXT) state_save.$(OBJEXT) step_mgr.$(OBJEXT) \
-	trigger_mgr.$(OBJEXT)
+am_slurmctld_OBJECTS = acct_policy.$(OBJEXT) agent.$(OBJEXT) \
+	backup.$(OBJEXT) controller.$(OBJEXT) job_mgr.$(OBJEXT) \
+	job_scheduler.$(OBJEXT) licenses.$(OBJEXT) locks.$(OBJEXT) \
+	node_mgr.$(OBJEXT) node_scheduler.$(OBJEXT) \
+	partition_mgr.$(OBJEXT) ping_nodes.$(OBJEXT) \
+	power_save.$(OBJEXT) proc_req.$(OBJEXT) read_config.$(OBJEXT) \
+	sched_plugin.$(OBJEXT) srun_comm.$(OBJEXT) \
+	state_save.$(OBJEXT) step_mgr.$(OBJEXT) trigger_mgr.$(OBJEXT)
 slurmctld_OBJECTS = $(am_slurmctld_OBJECTS)
 slurmctld_DEPENDENCIES = $(top_builddir)/src/common/libdaemonize.la \
 	$(top_builddir)/src/common/libcommon.o
@@ -277,6 +277,8 @@ slurmctld_LDADD = \
 	$(top_builddir)/src/common/libcommon.o -ldl
 
 slurmctld_SOURCES = \
+	acct_policy.c	\
+	acct_policy.h	\
 	agent.c  	\
 	agent.h		\
 	backup.c	\
@@ -382,6 +384,7 @@ mostlyclean-compile:
 distclean-compile:
 	-rm -f *.tab.c
 
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/acct_policy.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/agent.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/backup.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/controller.Po@am__quote@
diff --git a/src/slurmctld/acct_policy.c b/src/slurmctld/acct_policy.c
new file mode 100644
index 0000000000000000000000000000000000000000..10daf04456c824dafad48b0aca730f69b3dffad9
--- /dev/null
+++ b/src/slurmctld/acct_policy.c
@@ -0,0 +1,135 @@
+/*****************************************************************************\
+ *  acct_policy.c - Enforce accounting policy
+ *****************************************************************************
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Morris Jette <jette1@llnl.gov>
+ *  LLNL-CODE-402394.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifdef HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
+#include <slurm/slurm_errno.h>
+
+#include "src/common/assoc_mgr.h"
+#include "src/common/slurm_accounting_storage.h"
+
+#include "src/slurmctld/slurmctld.h"
+
+#define _DEBUG 0
+
+static bool _valid_job_assoc(struct job_record *job_ptr)
+{
+	acct_association_rec_t assoc_rec, *assoc_ptr;
+
+	assoc_ptr = job_ptr->assoc_ptr;
+	if ((assoc_ptr == NULL) ||
+	    (assoc_ptr-> id != job_ptr->assoc_id) ||
+	    (assoc_ptr->uid != job_ptr->user_id)) {
+		error("Invalid assoc_ptr for jobid=%u", job_ptr->job_id);
+		bzero(&assoc_rec, sizeof(acct_association_rec_t));
+		assoc_rec.uid       = job_ptr->user_id;
+		assoc_rec.partition = job_ptr->partition;
+		assoc_rec.acct      = job_ptr->account;
+		if (assoc_mgr_fill_in_assoc(acct_db_conn, &assoc_rec,
+					    accounting_enforce, &assoc_ptr)) {
+			info("_validate_job_assoc: invalid account or "
+			     "partition for uid=%u jobid=%u",
+			     job_ptr->user_id, job_ptr->job_id);
+			return false;
+		}
+		job_ptr->assoc_id = assoc_rec.id;
+		job_ptr->assoc_ptr = (void *) assoc_ptr;
+	}
+	return true;
+}
+
+/*
+ * acct_policy_job_begin - Note that a job is starting for accounting
+ *	policy purposes.
+ */
+extern void acct_policy_job_begin(struct job_record *job_ptr)
+{
+	acct_association_rec_t *assoc_ptr;
+
+	if (!accounting_enforce || !_valid_job_assoc(job_ptr))
+		return;
+
+	assoc_ptr = job_ptr->assoc_ptr;
+	assoc_ptr->used_jobs++;
+}
+
+/*
+ * acct_policy_job_fini - Note that a job is completing for accounting
+ *	policy purposes.
+ */
+extern void acct_policy_job_fini(struct job_record *job_ptr)
+{
+	acct_association_rec_t *assoc_ptr;
+
+	if (!accounting_enforce || !_valid_job_assoc(job_ptr))
+		return;
+
+	assoc_ptr = job_ptr->assoc_ptr;
+	if (assoc_ptr->used_jobs)
+		assoc_ptr->used_jobs--;
+	else
+		error("acct_policy_job_fini: used_jobs underflow");
+}
+
+/*
+ * acct_policy_job_runnable - Determine of the specified job
+ *	can execute right now or not depending upon accounting
+ *	policy (e.g. running job limit for this association).
+ */
+extern bool acct_policy_job_runnable(struct job_record *job_ptr)
+{
+	acct_association_rec_t *assoc_ptr;
+
+	if (!accounting_enforce)
+		return true;
+	if (!_valid_job_assoc(job_ptr))
+		return false;
+
+	assoc_ptr = job_ptr->assoc_ptr;
+#if _DEBUG
+	info("acct_job_limits: %u of %u", 
+	     assoc_ptr->used_jobs, assoc_ptr->max_jobs);
+#endif
+
+	if ((assoc_ptr->max_jobs != NO_VAL) &&
+	    (assoc_ptr->max_jobs != INFINITE) &&
+	    (assoc_ptr->used_jobs >= assoc_ptr->max_jobs))
+		return false;
+	return true;
+}
diff --git a/src/slurmctld/acct_policy.h b/src/slurmctld/acct_policy.h
new file mode 100644
index 0000000000000000000000000000000000000000..6bed6308c816f4403e41e2a8711b77d17ba7f8a1
--- /dev/null
+++ b/src/slurmctld/acct_policy.h
@@ -0,0 +1,60 @@
+/*****************************************************************************\
+ *  acct_policy.h - definitions of functions in acct_policy.c
+ *****************************************************************************
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Morris Jette <jette@llnl.gov> et. al.
+ *  LLNL-CODE-402394.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under 
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef _HAVE_ACCT_POLICY_H
+#define _HAVE_ACCT_POLICY_H
+
+/*
+ * acct_policy_job_begin - Note that a job is starting for accounting
+ *	policy purposes.
+ */
+extern void acct_policy_job_begin(struct job_record *job_ptr);
+
+/*
+ * acct_policy_job_fini - Note that a job is completing for accounting
+ *	policy purposes.
+ */
+extern void acct_policy_job_fini(struct job_record *job_ptr);
+
+/*
+ * acct_policy_job_runnable - Determine of the specified job
+ *	can execute right now or not depending upon accounting
+ *	policy (e.g. running job limit for this association).
+ */
+extern bool acct_policy_job_runnable(struct job_record *job_ptr);
+
+#endif /* !_HAVE_ACCT_POLICY_H */
diff --git a/src/slurmctld/controller.c b/src/slurmctld/controller.c
index 0e8f00bad41f3087b1eed8c1ec8b782e03b5f037..b6903a886f404426a9f774435b89e87b2302f09c 100644
--- a/src/slurmctld/controller.c
+++ b/src/slurmctld/controller.c
@@ -173,6 +173,7 @@ static void         _init_config(void);
 static void         _init_pidfile(void);
 static void         _kill_old_slurmctld(void);
 static void         _parse_commandline(int argc, char *argv[]);
+static void         _remove_assoc(acct_association_rec_t *rec);
 inline static int   _report_locks_set(void);
 static void *       _service_connection(void *arg);
 static int          _shutdown_backup_controller(int wait_time);
@@ -196,6 +197,7 @@ int main(int argc, char *argv[])
 	/* Locks: Write configuration, job, node, and partition */
 	slurmctld_lock_t config_write_lock = {
 		WRITE_LOCK, WRITE_LOCK, WRITE_LOCK, WRITE_LOCK };
+	assoc_init_args_t assoc_init_arg;
 
 	/*
 	 * Establish initial configuration
@@ -297,7 +299,9 @@ int main(int argc, char *argv[])
 	slurmctld_cluster_name = xstrdup(slurmctld_conf.cluster_name);
 	accounting_enforce = slurmctld_conf.accounting_storage_enforce;
 	acct_db_conn = acct_storage_g_get_connection(true, false);
-	if (assoc_mgr_init(acct_db_conn, accounting_enforce) &&
+	assoc_init_arg.enforce = accounting_enforce;
+	assoc_init_arg.remove_assoc_notify = _remove_assoc;
+	if (assoc_mgr_init(acct_db_conn, &assoc_init_arg) &&
 	    accounting_enforce) {
 		error("assoc_mgr_init failure");
 		fatal("slurmdbd and/or database must be up at "
@@ -375,11 +379,11 @@ int main(int argc, char *argv[])
 			    (!stat("/tmp/slurm_accounting_first", &stat_buf))) {
 				/* When first starting to write node state
 				 * information to Gold or SlurmDBD, create 
-				 * a file called "/tmp/slurm_accounting_first" to 
-				 * capture node initialization information */
-		   
+				 * a file called "/tmp/slurm_accounting_first"  
+				 * to capture node initialization information */
+				
 				_accounting_mark_all_nodes_down("cold-start");
-				 unlink("/tmp/slurm_accounting_first");
+				unlink("/tmp/slurm_accounting_first");
 			}
 		} else {
 			error("this host (%s) not valid controller (%s or %s)",
@@ -391,7 +395,7 @@ int main(int argc, char *argv[])
 		if(!acct_db_conn) {
 			acct_db_conn = 
 				acct_storage_g_get_connection(true, false);
-			if (assoc_mgr_init(acct_db_conn, accounting_enforce) &&
+			if (assoc_mgr_init(acct_db_conn, &assoc_init_arg) &&
 			    accounting_enforce) {
 				error("assoc_mgr_init failure");
 				fatal("slurmdbd and/or database must be up at "
@@ -496,12 +500,6 @@ int main(int argc, char *argv[])
 	if (i >= 10)
 		error("Left %d agent threads active", cnt);
 
-	/* Purge our local data structures */
-	job_fini();
-	part_fini();	/* part_fini() must preceed node_fini() */
-	node_fini();
-	trigger_fini();
-
 	/* Plugins are needed to purge job/node data structures,
 	 * unplug after other data structures are purged */
 	g_slurm_jobcomp_fini();
@@ -514,6 +512,12 @@ int main(int argc, char *argv[])
 	switch_fini();
 	assoc_mgr_fini();
 
+	/* Purge our local data structures */
+	job_fini();
+	part_fini();	/* part_fini() must preceed node_fini() */
+	node_fini();
+	trigger_fini();
+
 	/* purge remaining data structures */
 	slurm_cred_ctx_destroy(slurmctld_config.cred_ctx);
 	slurm_crypto_fini();	/* must be after ctx_destroy */
@@ -948,6 +952,21 @@ static int _accounting_mark_all_nodes_down(char *reason)
 	}
 	return rc;
 }
+
+static void _remove_assoc(acct_association_rec_t *rec)
+{
+	int cnt = 0;
+
+	if (accounting_enforce)
+		cnt = job_cancel_by_assoc_id(rec->id);
+
+	if (cnt) {
+		info("Removed association id:%u user:%s, cancelled %u jobs",
+		     rec->id, rec->user, cnt);
+	} else
+		debug("Removed association id:%u user:%s", rec->id, rec->user);
+}
+
 /*
  * _slurmctld_background - process slurmctld background activities
  *	purge defunct job records, save state, schedule jobs, and 
@@ -1008,8 +1027,9 @@ static void *_slurmctld_background(void *no_data)
 	(void) pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
 	debug3("_slurmctld_background pid = %u", getpid());
 
-	while (slurmctld_config.shutdown_time == 0) {
-		sleep(1);
+	while (1) {
+		if (slurmctld_config.shutdown_time == 0)
+			sleep(1);
 
 		now = time(NULL);
 		START_TIMER;
diff --git a/src/slurmctld/job_mgr.c b/src/slurmctld/job_mgr.c
index 918c053e2805278b713cb97e895c9afd3319fa24..1dada0c7925e8441f4fcbc4f07af32ef1ed7614f 100644
--- a/src/slurmctld/job_mgr.c
+++ b/src/slurmctld/job_mgr.c
@@ -3,7 +3,7 @@
  *	Note: there is a global job list (job_list), time stamp 
  *	(last_job_update), and hash table (job_hash)
  *
- *  $Id: job_mgr.c 14154 2008-05-29 17:51:52Z jette $
+ *  $Id: job_mgr.c 14311 2008-06-23 18:55:55Z jette $
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008 Lawrence Livermore National Security.
@@ -371,7 +371,7 @@ int dump_all_job_state(void)
  */
 extern int load_all_job_state(void)
 {
-	int data_allocated, data_read = 0, error_code = 0;
+	int data_allocated, data_read = 0, error_code = SLURM_SUCCESS;
 	uint32_t data_size = 0;
 	int state_fd, job_cnt = 0;
 	char *data = NULL, *state_file;
@@ -414,11 +414,11 @@ extern int load_all_job_state(void)
 	xfree(state_file);
 	unlock_state_files();
 
-	if (job_id_sequence == 0)
-		job_id_sequence = slurmctld_conf.first_job_id;
+	job_id_sequence = MAX(job_id_sequence, slurmctld_conf.first_job_id);
+	if (error_code)
+		return error_code;
 
 	buffer = create_buf(data, data_size);
-
 	safe_unpackstr_xmalloc(&ver_str, &ver_str_len, buffer);
 	debug3("Version string in job_state header is %s", ver_str);
 	if ((!ver_str) || strcmp(ver_str, JOB_STATE_VERSION) != 0) {
@@ -446,12 +446,12 @@ extern int load_all_job_state(void)
 	debug3("Set job_id_sequence to %u", job_id_sequence);
 
 	free_buf(buffer);
-	info("Recovered state of %d jobs", job_cnt);
+	info("Recovered information about %d jobs", job_cnt);
 	return error_code;
 
 unpack_error:
 	error("Incomplete job data checkpoint file");
-	info("State of %d jobs recovered", job_cnt);
+	info("Recovered information about %d jobs", job_cnt);
 	free_buf(buffer);
 	return SLURM_FAILURE;
 }
@@ -549,6 +549,7 @@ static int _load_job_state(Buf buffer)
 	uint32_t exit_code, num_procs, assoc_id, db_index, name_len,
 		total_procs;
 	time_t start_time, end_time, suspend_time, pre_sus_time, tot_sus_time;
+	time_t now = time(NULL);
 	uint16_t job_state, next_step_id, details, batch_flag, step_flag;
 	uint16_t kill_on_node_fail, kill_on_step_done, qos;
 	uint16_t alloc_resp_port, other_port, mail_type, state_reason;
@@ -560,6 +561,7 @@ static int _load_job_state(Buf buffer)
 	struct part_record *part_ptr;
 	int error_code;
 	select_jobinfo_t select_jobinfo = NULL;
+	acct_association_rec_t assoc_rec, *assoc_ptr = NULL;
 
 	safe_unpack32(&assoc_id, buffer);
 	safe_unpack32(&job_id, buffer);
@@ -668,7 +670,7 @@ static int _load_job_state(Buf buffer)
 		job_ptr->job_state = JOB_FAILED;
 		job_ptr->exit_code = 1;
 		job_ptr->state_reason = FAIL_SYSTEM;
-		job_ptr->end_time = time(NULL);
+		job_ptr->end_time = now;
 		goto unpack_error;
 	}
 
@@ -730,12 +732,33 @@ static int _load_job_state(Buf buffer)
 	job_ptr->start_time   = start_time;
 	job_ptr->state_reason = state_reason;
 	job_ptr->suspend_time = suspend_time;
-	job_ptr->time_last_active = time(NULL);
+	job_ptr->time_last_active = now;
 	job_ptr->time_limit   = time_limit;
 	job_ptr->total_procs  = total_procs;
 	job_ptr->tot_sus_time = tot_sus_time;
 	job_ptr->user_id      = user_id;
-	info("recovered job id %u", job_id);
+
+	bzero(&assoc_rec, sizeof(acct_association_rec_t));
+	assoc_rec.acct      = job_ptr->account;
+	assoc_rec.partition = job_ptr->partition;
+	assoc_rec.uid       = job_ptr->user_id;
+
+	if (assoc_mgr_fill_in_assoc(acct_db_conn, &assoc_rec,
+				    accounting_enforce,
+				    &assoc_ptr) &&
+	    accounting_enforce && (!IS_JOB_FINISHED(job_ptr))) {
+		info("Cancelling job %u with invalid association",
+		     job_id);
+		job_ptr->job_state = JOB_CANCELLED;
+		job_ptr->state_reason = FAIL_BANK_ACCOUNT;
+		if (IS_JOB_PENDING(job_ptr))
+			job_ptr->start_time = now;
+		job_ptr->end_time = now;
+		jobacct_storage_g_job_complete(acct_db_conn, job_ptr);
+	} else {
+		info("Recovered job %u", job_id);
+		job_ptr->assoc_ptr = (void *) assoc_ptr;
+	}
 
 	safe_unpack16(&step_flag, buffer);
 	while (step_flag == STEP_FLAG) {
@@ -894,6 +917,8 @@ static int _load_job_details(struct job_record *job_ptr, Buf buffer)
 	job_ptr->details->acctg_freq = acctg_freq;
 	job_ptr->details->contiguous = contiguous;
 	job_ptr->details->cpus_per_task = cpus_per_task;
+	/* FIXME: Need to save/restore actual task_dist value */
+	job_ptr->details->task_dist = SLURM_DIST_CYCLIC;
 	job_ptr->details->ntasks_per_node = ntasks_per_node;
 	job_ptr->details->job_min_procs = job_min_procs;
 	job_ptr->details->job_min_memory = job_min_memory;
@@ -994,8 +1019,16 @@ extern int kill_job_by_part_name(char *part_name)
 			continue;
 		job_ptr->part_ptr = NULL;
 
-		if (job_ptr->job_state == JOB_SUSPENDED)
+		if (job_ptr->job_state == JOB_SUSPENDED) {
+			enum job_states suspend_job_state = job_ptr->job_state;
+			/* we can't have it as suspended when we call the
+			 * accounting stuff.
+			 */
+			job_ptr->job_state = JOB_CANCELLED;
+			jobacct_storage_g_job_suspend(acct_db_conn, job_ptr);
+			job_ptr->job_state = suspend_job_state;
 			suspended = true;
+		}
 		if ((job_ptr->job_state == JOB_RUNNING) 
 		    || (job_ptr->job_state == JOB_PENDING)
 		    || suspended) {
@@ -1059,8 +1092,17 @@ extern int kill_running_job_by_node_name(char *node_name, bool step_test)
 		if ((job_ptr->node_bitmap == NULL) ||
 		    (!bit_test(job_ptr->node_bitmap, bit_position)))
 			continue;	/* job not on this node */
-		if (job_ptr->job_state == JOB_SUSPENDED)
+		if (job_ptr->job_state == JOB_SUSPENDED) {
+			enum job_states suspend_job_state = job_ptr->job_state;
+			/* we can't have it as suspended when we call the
+			 * accounting stuff.
+			 */
+			job_ptr->job_state = JOB_CANCELLED;
+			jobacct_storage_g_job_suspend(acct_db_conn, job_ptr);
+			job_ptr->job_state = suspend_job_state;
 			suspended = true;
+		}
+
 		if (job_ptr->job_state & JOB_COMPLETING) {
 			job_count++;
 			bit_clear(job_ptr->node_bitmap, bit_position);
@@ -1109,14 +1151,17 @@ extern int kill_running_job_by_node_name(char *node_name, bool step_test)
 				slurm_sched_requeue(job_ptr, requeue_msg);
 				job_ptr->time_last_active  = now;
 				if (suspended) {
-					job_ptr->end_time = job_ptr->suspend_time;
+					job_ptr->end_time = 
+						job_ptr->suspend_time;
 					job_ptr->tot_sus_time += 
-						difftime(now, job_ptr->suspend_time);
+						difftime(now,
+							 job_ptr->suspend_time);
 				} else
 					job_ptr->end_time = now;
 				
-				/* We want this job to look like it was cancelled in the
-				 * accounting logs. Set a new submit time so the restarted
+				/* We want this job to look like it
+				 * was cancelled in the accounting
+				 * logs. Set a new submit time so the restarted
 				 * job looks like a new job. */
 				job_ptr->job_state  = JOB_CANCELLED;
 				deallocate_nodes(job_ptr, false, suspended);
@@ -1482,6 +1527,7 @@ extern int job_allocate(job_desc_msg_t * job_specs, int immediate,
  
 	if ((error_code == ESLURM_NODES_BUSY) ||
 	    (error_code == ESLURM_JOB_HELD) ||
+	    (error_code == ESLURM_ACCOUNTING_POLICY) ||
 	    (error_code == ESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE)) {
 		/* Not fatal error, but job can't be scheduled right now */
 		if (immediate) {
@@ -1491,7 +1537,8 @@ extern int job_allocate(job_desc_msg_t * job_specs, int immediate,
 			job_ptr->start_time = job_ptr->end_time = now;
 			job_completion_logger(job_ptr);
 		} else {	/* job remains queued */
-			if (error_code == ESLURM_NODES_BUSY) {
+			if ((error_code == ESLURM_NODES_BUSY) ||
+			    (error_code == ESLURM_ACCOUNTING_POLICY)) {
 				error_code = SLURM_SUCCESS;
 			}
 		}
@@ -1534,8 +1581,17 @@ extern int job_fail(uint32_t job_id)
 
 	if (IS_JOB_FINISHED(job_ptr))
 		return ESLURM_ALREADY_DONE;
-	if (job_ptr->job_state == JOB_SUSPENDED)
+	if (job_ptr->job_state == JOB_SUSPENDED) {
+		enum job_states suspend_job_state = job_ptr->job_state;
+		/* we can't have it as suspended when we call the
+		 * accounting stuff.
+		 */
+		job_ptr->job_state = JOB_CANCELLED;
+		jobacct_storage_g_job_suspend(acct_db_conn, job_ptr);
+		job_ptr->job_state = suspend_job_state;
 		suspended = true;
+	}
+
 	if ((job_ptr->job_state == JOB_RUNNING) || suspended) {
 		/* No need to signal steps, deallocate kills them */
 		job_ptr->time_last_active       = now;
@@ -1576,6 +1632,18 @@ extern int job_signal(uint32_t job_id, uint16_t signal, uint16_t batch_flag,
 	struct job_record *job_ptr;
 	time_t now = time(NULL);
 	bool super_user;
+	static bool wiki2_sched = false;
+	static bool wiki2_sched_test = false;
+
+	/* Jobs submitted using Moab command should be cancelled using
+	 * Moab command for accurate job records */
+	if (!wiki2_sched_test) {
+		char *sched_type = slurm_get_sched_type();
+		if (strcmp(sched_type, "sched/wiki2") == 0)
+			wiki2_sched = true;
+		xfree(sched_type);
+		wiki2_sched_test = true;
+	}
 
 	job_ptr = find_job_record(job_id);
 	if (job_ptr == NULL) {
@@ -1589,10 +1657,10 @@ extern int job_signal(uint32_t job_id, uint16_t signal, uint16_t batch_flag,
 		      uid);
 		return ESLURM_ACCESS_DENIED;
 	}
-	if ((!super_user) && job_ptr->part_ptr
-	    &&  (job_ptr->part_ptr->root_only)) {
-		info("Attempt to cancel job in RootOnly partition from uid %d",
-		     uid);
+	if ((!super_user) && (signal == SIGKILL) && job_ptr->part_ptr &&
+	    (job_ptr->part_ptr->root_only) && wiki2_sched) {
+		info("Attempt to cancel Moab job using Slurm command from "
+		     "uid %d", uid);
 		return ESLURM_ACCESS_DENIED;
 	}
 
@@ -1621,6 +1689,7 @@ extern int job_signal(uint32_t job_id, uint16_t signal, uint16_t batch_flag,
 		job_ptr->end_time       = job_ptr->suspend_time;
 		job_ptr->tot_sus_time  += difftime(now, job_ptr->suspend_time);
 		job_ptr->job_state      = JOB_CANCELLED | JOB_COMPLETING;
+		jobacct_storage_g_job_suspend(acct_db_conn, job_ptr);
 		deallocate_nodes(job_ptr, false, true);
 		job_completion_logger(job_ptr);
 		verbose("job_signal %u of suspended job %u successful",
@@ -1725,6 +1794,13 @@ extern int job_complete(uint32_t job_id, uid_t uid, bool requeue,
 	if (job_ptr->job_state == JOB_RUNNING)
 		job_comp_flag = JOB_COMPLETING;
 	if (job_ptr->job_state == JOB_SUSPENDED) {
+		enum job_states suspend_job_state = job_ptr->job_state;
+		/* we can't have it as suspended when we call the
+		 * accounting stuff.
+		 */
+		job_ptr->job_state = JOB_CANCELLED;
+		jobacct_storage_g_job_suspend(acct_db_conn, job_ptr);
+		job_ptr->job_state = suspend_job_state;
 		job_comp_flag = JOB_COMPLETING;
 		suspended = true;
 	}
@@ -1806,7 +1882,6 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 	enum job_state_reason fail_reason;
 	struct part_record *part_ptr;
 	bitstr_t *req_bitmap = NULL, *exc_bitmap = NULL;
-	bool super_user = false;
 	struct job_record *job_ptr;
 	uint32_t total_nodes, max_procs;
 	acct_association_rec_t assoc_rec, *assoc_ptr;
@@ -1820,6 +1895,7 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 	uint16_t conn_type;
 #endif
 
+	*job_pptr = (struct job_record *) NULL;
 	/* find selected partition */
 	if (job_desc->partition) {
 		part_ptr = list_find_first(part_list, &list_find_part,
@@ -1838,15 +1914,29 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 		}
 		part_ptr = default_part_loc;
 	}
+
 	if (job_desc->min_nodes == NO_VAL)
 		job_desc->min_nodes = part_ptr->min_nodes_orig;
+	else if ((job_desc->min_nodes > part_ptr->max_nodes_orig) &&
+		 slurmctld_conf.enforce_part_limits) {
+		info("_job_create: job's min nodes greater than partition's "
+		     "max nodes (%u > %u)", 
+		     job_desc->min_nodes, part_ptr->max_nodes_orig);
+		error_code = ESLURM_TOO_MANY_REQUESTED_NODES;
+		return error_code;
+	} else if ((job_desc->min_nodes < part_ptr->min_nodes_orig) &&
+		   ((job_desc->max_nodes == NO_VAL) ||
+		    (job_desc->max_nodes >= part_ptr->min_nodes_orig)))
+		job_desc->min_nodes = part_ptr->min_nodes_orig;
+
 	if (job_desc->max_nodes == NO_VAL) {
 #ifdef HAVE_BG
 		job_desc->max_nodes = part_ptr->min_nodes_orig;
 #else
 		;
 #endif
-	} else if (job_desc->max_nodes < part_ptr->min_nodes_orig) {
+	} else if ((job_desc->max_nodes < part_ptr->min_nodes_orig) &&
+		   slurmctld_conf.enforce_part_limits) {
 		info("_job_create: job's max nodes less than partition's "
 		     "min nodes (%u < %u)", 
 		     job_desc->max_nodes, part_ptr->min_nodes_orig);
@@ -1854,6 +1944,16 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 		return error_code;
 	}
 
+	if ((job_desc->time_limit != NO_VAL) &&
+	    (job_desc->time_limit > part_ptr->max_time) &&
+	    slurmctld_conf.enforce_part_limits) {
+		info("_job_create: job's time greater than partition's "
+		     "(%u > %u)", 
+		     job_desc->time_limit, part_ptr->max_time);
+		error_code = ESLURM_INVALID_TIME_LIMIT;
+		return error_code;
+	}
+
 	debug3("before alteration asking for nodes %u-%u procs %u", 
 	       job_desc->min_nodes, job_desc->max_nodes,
 	       job_desc->num_procs);
@@ -1864,7 +1964,6 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 	       job_desc->min_nodes, job_desc->max_nodes,
 	       job_desc->num_procs, max_procs);
 	
-	*job_pptr = (struct job_record *) NULL;
 	if ((error_code = _validate_job_desc(job_desc, allocate, submit_uid)))
 		return error_code;
  
@@ -2015,7 +2114,7 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 		job_desc->max_nodes = 0;
 	if ((part_ptr->state_up)
 	    &&  (job_desc->num_procs > part_ptr->total_cpus)) {
-		info("Job requested too many cpus (%d) of partition %s(%d)", 
+		info("Job requested too many cpus (%u) of partition %s(%u)", 
 		     job_desc->num_procs, part_ptr->name, 
 		     part_ptr->total_cpus);
 		error_code = ESLURM_TOO_MANY_REQUESTED_CPUS;
@@ -2025,7 +2124,7 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 	select_g_alter_node_cnt(SELECT_APPLY_NODE_MIN_OFFSET,
 				&total_nodes);
 	if ((part_ptr->state_up) &&  (job_desc->min_nodes > total_nodes)) {
-		info("Job requested too many nodes (%d) of partition %s(%d)", 
+		info("Job requested too many nodes (%u) of partition %s(%u)", 
 		     job_desc->min_nodes, part_ptr->name, 
 		     part_ptr->total_nodes);
 		error_code = ESLURM_TOO_MANY_REQUESTED_NODES;
@@ -2089,20 +2188,15 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 	 * otherwise leave job queued and provide warning code */
 	detail_ptr = job_ptr->details;
 	fail_reason= WAIT_NO_REASON;
-	if ((job_desc->user_id == 0) ||
-	    (job_desc->user_id == slurmctld_conf.slurm_user_id))
-		super_user = true;
-	if ((!super_user) && 
-	    (job_desc->min_nodes > part_ptr->max_nodes)) {
-		info("Job %u requested too many nodes (%d) of "
-		     "partition %s(%d)", 
+	if (job_desc->min_nodes > part_ptr->max_nodes) {
+		info("Job %u requested too many nodes (%u) of "
+		     "partition %s(%u)", 
 		     job_ptr->job_id, job_desc->min_nodes, 
 		     part_ptr->name, part_ptr->max_nodes);
 		fail_reason = WAIT_PART_NODE_LIMIT;
-	} else if ((!super_user) &&
-	           (job_desc->max_nodes != 0) &&    /* no max_nodes for job */
+	} else if ((job_desc->max_nodes != 0) &&    /* no max_nodes for job */
 		   (job_desc->max_nodes < part_ptr->min_nodes)) {
-		info("Job %u requested too few nodes (%d) of partition %s(%d)",
+		info("Job %u requested too few nodes (%u) of partition %s(%u)",
 		     job_ptr->job_id, job_desc->max_nodes, 
 		     part_ptr->name, part_ptr->min_nodes);
 		fail_reason = WAIT_PART_NODE_LIMIT;
@@ -2110,6 +2204,10 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 		info("Job %u requested down partition %s", 
 		     job_ptr->job_id, part_ptr->name);
 		fail_reason = WAIT_PART_STATE;
+	} else if ((job_ptr->time_limit != NO_VAL) &&
+		   (job_ptr->time_limit > part_ptr->max_time)) {
+		info("Job %u exceeds partition time limit", job_ptr->job_id);
+		fail_reason = WAIT_PART_TIME_LIMIT;
 	}
 	if (fail_reason != WAIT_NO_REASON) {
 		error_code = ESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE;
@@ -3378,6 +3476,8 @@ void reset_job_bitmaps(void)
 					JOB_COMPLETING;
 				job_ptr->tot_sus_time += 
 					difftime(now, job_ptr->suspend_time);
+				jobacct_storage_g_job_suspend(acct_db_conn, 
+							      job_ptr);
 			}
 			job_ptr->exit_code = MAX(job_ptr->exit_code, 1);
 			job_ptr->state_reason = FAIL_DOWN_NODE;
@@ -3458,8 +3558,7 @@ static void _reset_step_bitmaps(struct job_record *job_ptr)
  * NOTE: READ lock_slurmctld config before entry */
 void reset_first_job_id(void)
 {
-	if (job_id_sequence < slurmctld_conf.first_job_id)
-		job_id_sequence = slurmctld_conf.first_job_id;
+	job_id_sequence = MAX(job_id_sequence, slurmctld_conf.first_job_id);
 }
 
 /*
@@ -3470,8 +3569,7 @@ extern uint32_t get_next_job_id(void)
 {
 	uint32_t next_id;
 
-	if (job_id_sequence == 0)
-		job_id_sequence = slurmctld_conf.first_job_id;
+	job_id_sequence = MAX(job_id_sequence, slurmctld_conf.first_job_id);
 	next_id = job_id_sequence + 1;
 	if (next_id >= MIN_NOALLOC_JOBID)
 		next_id = slurmctld_conf.first_job_id;
@@ -3486,8 +3584,7 @@ static void _set_job_id(struct job_record *job_ptr)
 {
 	uint32_t new_id;
 
-	if (job_id_sequence == 0)
-		job_id_sequence = slurmctld_conf.first_job_id;
+	job_id_sequence = MAX(job_id_sequence, slurmctld_conf.first_job_id);
 
 	xassert(job_ptr);
 	xassert (job_ptr->magic == JOB_MAGIC);
@@ -3969,7 +4066,8 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 			if (assoc_mgr_fill_in_assoc(acct_db_conn, &assoc_rec,
 						    accounting_enforce, 
 						    &assoc_ptr)) {
-				info("job_update: invalid account %s for job %u",
+				info("job_update: invalid account %s "
+				     "for job %u",
 				     job_specs->account, job_ptr->job_id);
 				error_code = ESLURM_INVALID_ACCOUNT;
 				/* Let update proceed. Note there is an invalid
@@ -4766,6 +4864,8 @@ extern void job_completion_logger(struct job_record  *job_ptr)
 	} else {	/* JOB_FAILED, JOB_NODE_FAIL, or JOB_TIMEOUT */
 		if (job_ptr->mail_type & MAIL_JOB_FAIL)
 			mail_job_info(job_ptr, MAIL_JOB_FAIL);
+		else if (job_ptr->mail_type & MAIL_JOB_END)
+			mail_job_info(job_ptr, MAIL_JOB_END);
 	}
 
 	g_slurm_jobcomp_write(job_ptr);
@@ -5219,8 +5319,17 @@ extern int job_requeue (uid_t uid, uint32_t job_id, slurm_fd conn_fd)
 		goto reply;
 	}
 
-	if (job_ptr->job_state == JOB_SUSPENDED)
+	if (job_ptr->job_state == JOB_SUSPENDED) {
+		enum job_states suspend_job_state = job_ptr->job_state;
+		/* we can't have it as suspended when we call the
+		 * accounting stuff.
+		 */
+		job_ptr->job_state = JOB_CANCELLED;
+		jobacct_storage_g_job_suspend(acct_db_conn, job_ptr);
+		job_ptr->job_state = suspend_job_state;
 		suspended = true;
+	}
+
 	job_ptr->time_last_active  = now;
 	if (suspended)
 		job_ptr->end_time = job_ptr->suspend_time;
@@ -5342,3 +5451,32 @@ static bool _validate_acct_policy(job_desc_msg_t *job_desc,
 
 	return true;
 }
+
+/*
+ * job_cancel_by_assoc_id - Cancel all pending and running jobs with a given
+ *	association ID. This happens when an association is deleted (e.g. when
+ *	a user is removed from the association database).
+ * RET count of cancelled jobs
+ */
+extern int job_cancel_by_assoc_id(uint32_t assoc_id)
+{
+	int cnt = 0;
+	ListIterator job_iterator;
+	struct job_record *job_ptr;
+
+	if (!job_list)
+		return cnt;
+
+	job_iterator = list_iterator_create(job_list);
+	while ((job_ptr = (struct job_record *) list_next(job_iterator))) {
+		if ((job_ptr->assoc_id != assoc_id) || 
+		    IS_JOB_FINISHED(job_ptr))
+			continue;
+		info("Association deleted, cancelling job %u", job_ptr->job_id);
+		job_signal(job_ptr->job_id, SIGKILL, 0, 0);
+		job_ptr->state_reason = FAIL_BANK_ACCOUNT;
+		cnt++;
+	}
+	list_iterator_destroy(job_iterator);
+	return cnt;
+}
diff --git a/src/slurmctld/job_scheduler.c b/src/slurmctld/job_scheduler.c
index 0d496b23e2b8f7825b117ed6f30c4e4d9fc44db6..3274da8fb252472139f93ed5614290efe681b8aa 100644
--- a/src/slurmctld/job_scheduler.c
+++ b/src/slurmctld/job_scheduler.c
@@ -54,6 +54,7 @@
 #include "src/common/xassert.h"
 #include "src/common/xstring.h"
 
+#include "src/slurmctld/acct_policy.h"
 #include "src/slurmctld/agent.h"
 #include "src/slurmctld/job_scheduler.h"
 #include "src/slurmctld/licenses.h"
@@ -283,6 +284,10 @@ extern int schedule(void)
 		job_ptr = job_queue[i].job_ptr;
 		if (job_ptr->priority == 0)	/* held */
 			continue;
+		if (!acct_policy_job_runnable(job_ptr)) {
+			job_ptr->state_reason = WAIT_ASSOC_LIMIT;
+			continue;
+		}
 		if (_failed_partition(job_ptr->part_ptr, failed_parts, 
 				      failed_part_cnt)) {
 			job_ptr->state_reason = WAIT_PRIORITY;
diff --git a/src/slurmctld/node_mgr.c b/src/slurmctld/node_mgr.c
index 7772adb0a35feb33c29d3bc9584ec64f984825a1..1ed44a1cd74dca17ab21e9d6dc84b282e47e8331 100644
--- a/src/slurmctld/node_mgr.c
+++ b/src/slurmctld/node_mgr.c
@@ -4,7 +4,7 @@
  *	hash table (node_hash_table), time stamp (last_node_update) and 
  *	configuration list (config_list)
  *
- *  $Id: node_mgr.c 14124 2008-05-23 21:12:21Z da $
+ *  $Id: node_mgr.c 14293 2008-06-19 19:27:39Z jette $
  *****************************************************************************
  *  Copyright (C) 2002-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -70,6 +70,7 @@
 #include "src/slurmctld/sched_plugin.h"
 #include "src/slurmctld/slurmctld.h"
 #include "src/slurmctld/trigger_mgr.h"
+#include "src/plugins/select/bluegene/plugin/bg_boot_time.h"
 
 #define _DEBUG		0
 #define MAX_RETRIES	10
@@ -1720,7 +1721,9 @@ extern int validate_nodes_via_front_end(
 		     * completes which waits for bgblock boot to complete.  
 		     * This can take several minutes on BlueGene. */
 		if (difftime(now, job_ptr->time_last_active) <= 
-				(1400 + 5 * job_ptr->node_cnt))
+
+		    (BG_FREE_PREVIOUS_BLOCK + BG_MIN_BLOCK_BOOT +
+		     BG_INCR_BLOCK_BOOT * job_ptr->node_cnt))
 			continue;
 #else
 		if (difftime(now, job_ptr->time_last_active) <= 5)
diff --git a/src/slurmctld/node_scheduler.c b/src/slurmctld/node_scheduler.c
index 2798ba45ecfed288d005fe274f874c05c87dfa97..1c63015a297b3c4ba625e1d8568c84aa7198faee 100644
--- a/src/slurmctld/node_scheduler.c
+++ b/src/slurmctld/node_scheduler.c
@@ -58,11 +58,12 @@
 #include "src/common/hostlist.h"
 #include "src/common/list.h"
 #include "src/common/node_select.h"
+#include "src/common/slurm_accounting_storage.h"
 #include "src/common/xassert.h"
 #include "src/common/xmalloc.h"
 #include "src/common/xstring.h"
-#include "src/common/slurm_accounting_storage.h"
 
+#include "src/slurmctld/acct_policy.h"
 #include "src/slurmctld/agent.h"
 #include "src/slurmctld/licenses.h"
 #include "src/slurmctld/node_scheduler.h"
@@ -110,7 +111,8 @@ static bitstr_t *_valid_features(struct job_details *detail_ptr,
 
 /*
  * allocate_nodes - change state of specified nodes to NODE_STATE_ALLOCATED
- *	also claim required licenses
+ *	also claim required licenses and resources reserved by accounting
+ *	policy association
  * IN job_ptr - job being allocated resources
  */
 extern void allocate_nodes(struct job_record *job_ptr)
@@ -125,14 +127,15 @@ extern void allocate_nodes(struct job_record *job_ptr)
 	}
 
 	license_job_get(job_ptr);
+	acct_policy_job_begin(job_ptr);
 	return;
 }
 
 
 /*
  * deallocate_nodes - for a given job, deallocate its nodes and make 
- *	their state NODE_STATE_COMPLETING
- *	also release the job's licenses
+ *	their state NODE_STATE_COMPLETING also release the job's licenses 
+ *	and resources reserved by accounting policy association
  * IN job_ptr - pointer to terminating job (already in some COMPLETING state)
  * IN timeout - true if job exhausted time limit, send REQUEST_KILL_TIMELIMIT
  *	RPC instead of REQUEST_TERMINATE_JOB
@@ -152,6 +155,7 @@ extern void deallocate_nodes(struct job_record *job_ptr, bool timeout,
 	xassert(job_ptr->details);
 
 	license_job_return(job_ptr);
+	acct_policy_job_fini(job_ptr);
 	if (slurm_sched_freealloc(job_ptr) != SLURM_SUCCESS)
 		error("slurm_sched_freealloc(%u): %m", job_ptr->job_id);
 	if (select_g_job_fini(job_ptr) != SLURM_SUCCESS)
@@ -659,9 +663,10 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size,
 			avail_nodes = bit_set_count(avail_bitmap);
 			tried_sched = false;	/* need to test these nodes */
 
-			if (shared) {
+			if (shared && ((i+1) < node_set_size) && 
+			    (node_set_ptr[i].weight == node_set_ptr[i+1].weight)) {
 				/* Keep accumulating so we can pick the
-				 * most lighly loaded nodes */
+				 * most lightly loaded nodes */
 				continue;
 			}
 
@@ -795,7 +800,6 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size,
 	return error_code;
 }
 
-
 /*
  * select_nodes - select and allocate nodes to a specific job
  * IN job_ptr - pointer to the job record
@@ -824,15 +828,17 @@ extern int select_nodes(struct job_record *job_ptr, bool test_only,
 	struct node_set *node_set_ptr = NULL;
 	struct part_record *part_ptr = job_ptr->part_ptr;
 	uint32_t min_nodes, max_nodes, req_nodes;
-	int super_user = false;
 	enum job_state_reason fail_reason;
 	time_t now = time(NULL);
 
 	xassert(job_ptr);
 	xassert(job_ptr->magic == JOB_MAGIC);
 
-	if ((job_ptr->user_id == 0) || (job_ptr->user_id == getuid()))
-		super_user = true;
+	if (!acct_policy_job_runnable(job_ptr)) {
+		job_ptr->state_reason = WAIT_ASSOC_LIMIT;
+		last_job_update = now;
+		return ESLURM_ACCOUNTING_POLICY;
+	}
 
 	/* identify partition */
 	if (part_ptr == NULL) {
@@ -849,8 +855,6 @@ extern int select_nodes(struct job_record *job_ptr, bool test_only,
 		fail_reason = WAIT_PART_STATE;
 	else if (job_ptr->priority == 0)	/* user or administrator hold */
 		fail_reason = WAIT_HELD;
-	else if (super_user)
-		;	/* ignore any time or node count limits */
 	else if ((job_ptr->time_limit != NO_VAL) &&
 		 (job_ptr->time_limit > part_ptr->max_time))
 		fail_reason = WAIT_PART_TIME_LIMIT;
@@ -886,19 +890,10 @@ extern int select_nodes(struct job_record *job_ptr, bool test_only,
 	/* enforce both user's and partition's node limits */
 	/* info("req: %u-%u, %u", job_ptr->details->min_nodes,
 	   job_ptr->details->max_nodes, part_ptr->max_nodes); */
-	if (super_user) {
-		min_nodes = job_ptr->details->min_nodes;
-	} else {
-		min_nodes = MAX(job_ptr->details->min_nodes, 
-				part_ptr->min_nodes);
-	}
-	if (job_ptr->details->max_nodes == 0) {
-		if (super_user)
-			max_nodes = INFINITE;
-		else
-			max_nodes = part_ptr->max_nodes;
-	} else if (super_user)
-		max_nodes = job_ptr->details->max_nodes;
+
+	min_nodes = MAX(job_ptr->details->min_nodes, part_ptr->min_nodes);
+	if (job_ptr->details->max_nodes == 0)
+		max_nodes = part_ptr->max_nodes;
 	else
 		max_nodes = MIN(job_ptr->details->max_nodes, 
 				part_ptr->max_nodes);
diff --git a/src/slurmctld/node_scheduler.h b/src/slurmctld/node_scheduler.h
index 42a4bf7dfc18419ab758c73840b3561e875778d2..a59fa5b1e1e9a3a5bc6c3b93b8cb5a18306763d6 100644
--- a/src/slurmctld/node_scheduler.h
+++ b/src/slurmctld/node_scheduler.h
@@ -98,4 +98,4 @@ extern void re_kill_job(struct job_record *job_ptr);
 extern int select_nodes(struct job_record *job_ptr, bool test_only,
 		bitstr_t **select_node_bitmap);
 
-#endif /* !_HAVE_NODE_SCHEDULER_H*/
+#endif /* !_HAVE_NODE_SCHEDULER_H */
diff --git a/src/slurmctld/proc_req.c b/src/slurmctld/proc_req.c
index 80aa09c035d9a65af0d54a63801bf13ac4f0eee9..ed3d9d2f8c679f485ee1f570a96d135e883ab54e 100644
--- a/src/slurmctld/proc_req.c
+++ b/src/slurmctld/proc_req.c
@@ -352,6 +352,7 @@ void _fill_ctld_conf(slurm_ctl_conf_t * conf_ptr)
 	conf_ptr->def_mem_per_task    = conf->def_mem_per_task;
 	conf_ptr->disable_root_jobs   = conf->disable_root_jobs;
 
+	conf_ptr->enforce_part_limits = conf->enforce_part_limits;
 	conf_ptr->epilog              = xstrdup(conf->epilog);
 	conf_ptr->epilog_msg_time     = conf->epilog_msg_time;
 
@@ -2853,6 +2854,8 @@ inline static void  _slurm_rpc_accounting_update_msg(slurm_msg_t *msg)
 			case ACCT_MODIFY_USER:
 			case ACCT_ADD_USER:
 			case ACCT_REMOVE_USER:
+			case ACCT_ADD_COORD:
+			case ACCT_REMOVE_COORD:
 				rc = assoc_mgr_update_local_users(object);
 				break;
 			case ACCT_ADD_ASSOC:
diff --git a/src/slurmctld/read_config.c b/src/slurmctld/read_config.c
index bf839eb66c2be017df165ddffbc3cfa31fdb9bcf..b219cda17abaa16ebc9ba236f1e8a2d260ec6ea5 100644
--- a/src/slurmctld/read_config.c
+++ b/src/slurmctld/read_config.c
@@ -55,6 +55,7 @@
 #include <time.h>
 #include <unistd.h>
 
+#include "src/common/assoc_mgr.h"
 #include "src/common/hostlist.h"
 #include "src/common/list.h"
 #include "src/common/macros.h"
@@ -66,6 +67,7 @@
 #include "src/common/switch.h"
 #include "src/common/xstring.h"
 
+#include "src/slurmctld/acct_policy.h"
 #include "src/slurmctld/job_scheduler.h"
 #include "src/slurmctld/licenses.h"
 #include "src/slurmctld/locks.h"
@@ -76,6 +78,7 @@
 #include "src/slurmctld/slurmctld.h"
 #include "src/slurmctld/trigger_mgr.h"
 
+static void _acct_restore_active_jobs(void);
 static int  _build_bitmaps(void);
 static void _build_bitmaps_pre_select(void);
 static int  _init_all_slurm_conf(void);
@@ -710,7 +713,7 @@ static int _build_all_partitionline_info()
 int read_slurm_conf(int recover)
 {
 	DEF_TIMERS;
-	int error_code, i, rc;
+	int error_code, i, rc, load_job_ret = SLURM_SUCCESS;
 	int old_node_record_count;
 	struct node_record *old_node_table_ptr;
 	char *old_auth_type       = xstrdup(slurmctld_conf.authtype);
@@ -783,10 +786,10 @@ int read_slurm_conf(int recover)
 	if (recover > 1) {	/* Load node, part and job info */
 		(void) load_all_node_state(false);
 		(void) load_all_part_state();
-		(void) load_all_job_state();
+		load_job_ret = load_all_job_state();
 	} else if (recover == 1) {	/* Load job info only */
 		(void) load_all_node_state(true);
-		(void) load_all_job_state();
+		load_job_ret = load_all_job_state();
 	} else {	/* Load no info, preserve all state */
 		if (old_node_table_ptr) {
 			info("restoring original state of nodes");
@@ -816,7 +819,7 @@ int read_slurm_conf(int recover)
 	_purge_old_node_state(old_node_table_ptr, old_node_record_count);
 
 	if ((rc = _build_bitmaps()))
-		return rc;	/* fatal error */
+		fatal("_build_bitmaps failure");
 
 	license_free();
 	if (license_init(slurmctld_conf.licenses) != SLURM_SUCCESS)
@@ -848,6 +851,11 @@ int read_slurm_conf(int recover)
 					 old_select_type_p);
 	error_code = MAX(error_code, rc);	/* not fatal */
 
+	/* Restore job accounting info if file missing or corrupted,
+	 * an extremely rare situation */
+	if (load_job_ret)
+		_acct_restore_active_jobs();
+
 	slurmctld_conf.last_update = time(NULL);
 	END_TIMER2("read_slurm_conf");
 	return error_code;
@@ -1175,6 +1183,7 @@ static void _validate_node_proc_count(void)
 
 /*
  * _restore_job_dependencies - Build depend_list and license_list for every job
+ *	also reset the runing job count for scheduling policy
  */
 static int _restore_job_dependencies(void)
 {
@@ -1185,8 +1194,14 @@ static int _restore_job_dependencies(void)
 	bool valid;
 	List license_list;
 
+	assoc_mgr_clear_used_info();
 	job_iterator = list_iterator_create(job_list);
 	while ((job_ptr = (struct job_record *) list_next(job_iterator))) {
+		if (accounting_enforce &&
+		    ((job_ptr->job_state == JOB_RUNNING) ||
+		     (job_ptr->job_state == JOB_SUSPENDED)))
+			acct_policy_job_begin(job_ptr);
+
 		license_list = license_job_validate(job_ptr->licenses, &valid);
 		if (job_ptr->license_list)
 			list_destroy(job_ptr->license_list);
@@ -1211,3 +1226,35 @@ static int _restore_job_dependencies(void)
 	list_iterator_destroy(job_iterator);
 	return error_code;
 }
+
+/* Flush accounting information on this cluster, then for each running or 
+ * suspended job, restore its state in the accounting system */
+static void _acct_restore_active_jobs(void)
+{
+	struct job_record *job_ptr;
+	ListIterator job_iterator;
+	struct step_record *step_ptr;
+	ListIterator step_iterator;
+
+	info("Reinitializing job accounting state");
+	acct_storage_g_flush_jobs_on_cluster(acct_db_conn,
+					     slurmctld_cluster_name,
+					     time(NULL));
+	job_iterator = list_iterator_create(job_list);
+	while ((job_ptr = (struct job_record *) list_next(job_iterator))) {
+		if (job_ptr->job_state == JOB_SUSPENDED)
+			jobacct_storage_g_job_suspend(acct_db_conn, job_ptr);
+		if ((job_ptr->job_state == JOB_SUSPENDED) ||
+		    (job_ptr->job_state == JOB_RUNNING)) {
+			jobacct_storage_g_job_start(acct_db_conn, job_ptr);
+			step_iterator = list_iterator_create(job_ptr->step_list);
+			while ((step_ptr = (struct step_record *) 
+					   list_next(step_iterator))) {
+				jobacct_storage_g_step_start(acct_db_conn, 
+							     step_ptr);
+			}
+			list_iterator_destroy (step_iterator);
+		}
+	}
+	list_iterator_destroy(job_iterator);
+}
diff --git a/src/slurmctld/sched_plugin.c b/src/slurmctld/sched_plugin.c
index fea958bde17635fb2f314df4dfbcc7b9edd02055..187143b3688093af0db031dca7049b2d94a0cdc8 100644
--- a/src/slurmctld/sched_plugin.c
+++ b/src/slurmctld/sched_plugin.c
@@ -106,6 +106,16 @@ slurm_sched_get_ops( slurm_sched_context_t *c )
 	};
 	int n_syms = sizeof( syms ) / sizeof( char * );
 
+	/* Find the correct plugin. */
+        c->cur_plugin = plugin_load_and_link(c->sched_type, n_syms, syms,
+					     (void **) &c->ops);
+        if ( c->cur_plugin != PLUGIN_INVALID_HANDLE ) 
+        	return &c->ops;
+
+	error("Couldn't find the specified plugin name for %s "
+	      "looking at all files",
+	      c->sched_type);
+	
 	/* Get plugin list. */
 	if ( c->plugin_list == NULL ) {
 		char *plugin_dir;
@@ -179,6 +189,8 @@ slurm_sched_context_destroy( slurm_sched_context_t *c )
 		if ( plugrack_destroy( c->plugin_list ) != SLURM_SUCCESS ) {
 			return SLURM_ERROR;
 		}
+	} else {
+		plugin_unload(c->cur_plugin);
 	}
 
 	xfree( c->sched_type );
diff --git a/src/slurmctld/slurmctld.h b/src/slurmctld/slurmctld.h
index 6ff3e1ee44dbebf9d50fec6e9a4a9076242cfa06..cd16cf877239fa8367fbf38d80563bc00d31e20a 100644
--- a/src/slurmctld/slurmctld.h
+++ b/src/slurmctld/slurmctld.h
@@ -794,6 +794,14 @@ extern int job_allocate(job_desc_msg_t * job_specs, int immediate,
 		int will_run, will_run_response_msg_t **resp, 
 		int allocate, uid_t submit_uid, struct job_record **job_pptr);
 
+/*
+ * job_cancel_by_assoc_id - Cancel all pending and running jobs with a given
+ *	association ID. This happens when an association is deleted (e.g. when
+ *	a user is removed from the association database).
+ * RET count of cancelled jobs
+ */
+extern int job_cancel_by_assoc_id(uint32_t assoc_id);
+
 /* log the completion of the specified job */
 extern void job_completion_logger(struct job_record  *job_ptr);
 
diff --git a/src/slurmctld/trigger_mgr.c b/src/slurmctld/trigger_mgr.c
index c1686129ac554e9f165bcb51aebef514a132d834..1e38a598619b71877f6c8c4d3dd13a1f1841d5c0 100644
--- a/src/slurmctld/trigger_mgr.c
+++ b/src/slurmctld/trigger_mgr.c
@@ -993,12 +993,18 @@ static void _trigger_run_program(trig_mgr_info_t *trig_in)
 
 static void _clear_event_triggers(void)
 {
-	if (trigger_down_nodes_bitmap)
-		bit_nclear(trigger_down_nodes_bitmap, 0, (node_record_count-1));
-	if (trigger_drained_nodes_bitmap)
-		bit_nclear(trigger_drained_nodes_bitmap, 0, (node_record_count-1));
-	if (trigger_up_nodes_bitmap)
-		bit_nclear(trigger_up_nodes_bitmap,   0, (node_record_count-1));
+	if (trigger_down_nodes_bitmap) {
+		bit_nclear(trigger_down_nodes_bitmap, 
+			   0, (bit_size(trigger_down_nodes_bitmap) - 1));
+	}
+	if (trigger_drained_nodes_bitmap) {
+		bit_nclear(trigger_drained_nodes_bitmap,
+			   0, (bit_size(trigger_drained_nodes_bitmap) - 1));
+	}
+	if (trigger_up_nodes_bitmap) {
+		bit_nclear(trigger_up_nodes_bitmap,   
+			   0, (bit_size(trigger_up_nodes_bitmap) - 1));
+	}
 	trigger_node_reconfig = false;
 	trigger_block_err = false;
 }
diff --git a/src/slurmd/common/proctrack.c b/src/slurmd/common/proctrack.c
index 651af3b859f3a16eb8cecc41285ee14fe3515431..4e3d33003ec9070f2c124cd57c54b521ea0d9566 100644
--- a/src/slurmd/common/proctrack.c
+++ b/src/slurmd/common/proctrack.c
@@ -96,6 +96,16 @@ _proctrack_get_ops( slurm_proctrack_context_t *c )
 	};
 	int n_syms = sizeof( syms ) / sizeof( char * );
 
+	/* Find the correct plugin. */
+        c->cur_plugin = plugin_load_and_link(c->proctrack_type, n_syms, syms,
+					     (void **) &c->ops);
+        if ( c->cur_plugin != PLUGIN_INVALID_HANDLE ) 
+        	return &c->ops;
+
+	error("Couldn't find the specified plugin name for %s "
+	      "looking at all files",
+	      c->proctrack_type);
+	
 	/* Get plugin list. */
 	if ( c->plugin_list == NULL ) {
 		char *plugin_dir;
@@ -171,6 +181,8 @@ _proctrack_context_destroy( slurm_proctrack_context_t *c )
 		if ( plugrack_destroy( c->plugin_list ) != SLURM_SUCCESS ) {
 			return SLURM_ERROR;
 		}
+	} else {
+		plugin_unload(c->cur_plugin);
 	}
 
 	xfree( c->proctrack_type );
diff --git a/src/slurmd/common/task_plugin.c b/src/slurmd/common/task_plugin.c
index 1882a9b0555a1044a77eff929214fbcdb7330bc2..3841edb68e3aa66b3d850546cc98862a59077ce3 100644
--- a/src/slurmd/common/task_plugin.c
+++ b/src/slurmd/common/task_plugin.c
@@ -92,6 +92,16 @@ _slurmd_task_get_ops(slurmd_task_context_t *c)
 	};
 	int n_syms = sizeof( syms ) / sizeof( char * );
 
+	/* Find the correct plugin. */
+        c->cur_plugin = plugin_load_and_link(c->task_type, n_syms, syms,
+					     (void **) &c->ops);
+        if ( c->cur_plugin != PLUGIN_INVALID_HANDLE ) 
+        	return &c->ops;
+
+	error("Couldn't find the specified plugin name for %s "
+	      "looking at all files",
+	      c->task_type);
+	
 	/* Get plugin list. */
 	if ( c->plugin_list == NULL ) {
 		char *plugin_dir;
@@ -156,6 +166,8 @@ _slurmd_task_context_destroy(slurmd_task_context_t *c)
 		if ( plugrack_destroy( c->plugin_list ) != SLURM_SUCCESS ) {
 			return SLURM_ERROR;
 		}
+	} else {
+		plugin_unload(c->cur_plugin);
 	}
 
 	xfree( c->task_type );
diff --git a/src/slurmd/slurmd/req.c b/src/slurmd/slurmd/req.c
index bab35a4a6df7f9454b4694f9e739260ebb9d58b5..86e2523843b183b7af5555291f9fd6c5657ee8ed 100644
--- a/src/slurmd/slurmd/req.c
+++ b/src/slurmd/slurmd/req.c
@@ -55,6 +55,7 @@
 #include <utime.h>
 #include <grp.h>
 
+#include "src/common/env.h"
 #include "src/common/hostlist.h"
 #include "src/common/jobacct_common.h"
 #include "src/common/log.h"
@@ -318,7 +319,9 @@ _send_slurmstepd_init(int fd, slurmd_step_type_t type, void *req,
 	Buf buffer = NULL;
 	slurm_msg_t msg;
 	uid_t uid = (uid_t)-1;
-	struct passwd *pw = NULL;
+	struct passwd pwd, *pwd_ptr;
+	char *pwd_buf;
+	size_t buf_size;
 	gids_t *gids = NULL;
 
 	int rank;
@@ -448,16 +451,19 @@ _send_slurmstepd_init(int fd, slurmd_step_type_t type, void *req,
 	free_buf(buffer);
 	
 	/* send cached group ids array for the relevant uid */
-	debug3("_send_slurmstepd_init: call to getpwuid");
-	if (!(pw = getpwuid(uid))) {
-		error("_send_slurmstepd_init getpwuid: %m");
+	debug3("_send_slurmstepd_init: call to getpwuid_r");
+	buf_size = sysconf(_SC_GETPW_R_SIZE_MAX);
+	pwd_buf = xmalloc(buf_size);
+	if (getpwuid_r(uid, &pwd, pwd_buf, buf_size, &pwd_ptr)) {
+		xfree(pwd_buf);
+		error("_send_slurmstepd_init getpwuid_r: %m");
 		len = 0;
 		safe_write(fd, &len, sizeof(int));
 		return -1;
 	}
-	debug3("_send_slurmstepd_init: return from getpwuid");
+	debug3("_send_slurmstepd_init: return from getpwuid_r");
 
-	if ((gids = _gids_cache_lookup(pw->pw_name, pw->pw_gid))) {
+	if ((gids = _gids_cache_lookup(pwd.pw_name, pwd.pw_gid))) {
 		int i;
 		uint32_t tmp32;
 		safe_write(fd, &gids->ngids, sizeof(int));
@@ -469,6 +475,7 @@ _send_slurmstepd_init(int fd, slurmd_step_type_t type, void *req,
 		len = 0;
 		safe_write(fd, &len, sizeof(int));
 	}
+	xfree(pwd_buf);
 	return 0;
 
 rwfail:
@@ -870,6 +877,49 @@ _prolog_error(batch_job_launch_msg_t *req, int rc)
 	close(fd);
 }
 
+/* load the user's environment on this machine if requested
+ * SLURM_GET_USER_ENV environment variable is set */
+static void
+_get_user_env(batch_job_launch_msg_t *req)
+{
+	struct passwd pwd, *pwd_ptr;
+	char *pwd_buf = NULL;
+	char **new_env;
+	size_t buf_size;
+	int i;
+
+	for (i=0; i<req->argc; i++) {
+		if (strcmp(req->environment[0], "SLURM_GET_USER_ENV=1") == 0)
+			break;
+	}
+	if (i >= req->argc)
+		return;		/* don't need to load env */
+
+	buf_size = sysconf(_SC_GETPW_R_SIZE_MAX);
+	pwd_buf = xmalloc(buf_size);
+	if (getpwuid_r(req->uid, &pwd, pwd_buf, buf_size, &pwd_ptr)) {
+		error("getpwuid_r(%u):%m", req->uid);
+	} else {
+		verbose("get env for user %s here", pwd.pw_name);
+		/* Permit up to 120 second delay before using cache file */
+		new_env = env_array_user_default(pwd.pw_name, 120, 0);
+		if (new_env) {
+			env_array_merge(&new_env, 
+					(const char **) req->environment);
+			env_array_free(req->environment);
+			req->environment = new_env;
+			req->envc = envcount(new_env);
+		} else {
+			/* One option is to kill the job, but it's 
+			 * probably better to try running with what 
+			 * we have. */
+			error("Unable to get user's local environment, "
+			      "running only with passed environment");
+		}
+	}
+	xfree(pwd_buf);
+}
+
 static void
 _rpc_batch_job(slurm_msg_t *msg)
 {
@@ -911,13 +961,13 @@ _rpc_batch_job(slurm_msg_t *msg)
 				     SELECT_DATA_BLOCK_ID, 
 				     &bg_part_id);
 
-#ifdef HAVE_BG
 		/* BlueGene prolog waits for partition boot and is very slow.
+		 * On any system we might need to load environment variables
+		 * for Moab (see --get-user-env), which could also be slow.
 		 * Just reply now and send a separate kill job request if the 
 		 * prolog or launch fail. */
 		slurm_send_rc_msg(msg, rc);
 		replied = true;
-#endif
 
 		rc = _run_prolog(req->job_id, req->uid, bg_part_id);
 		xfree(bg_part_id);
@@ -948,6 +998,7 @@ _rpc_batch_job(slurm_msg_t *msg)
 		rc = ESLURMD_CREDENTIAL_REVOKED;     /* job already ran */
 		goto done;
 	}
+	_get_user_env(req);
 
 	slurm_mutex_lock(&launch_mutex);
 	if (req->step_id == SLURM_BATCH_SCRIPT)
diff --git a/src/slurmd/slurmd/slurmd.c b/src/slurmd/slurmd/slurmd.c
index d46d3fc08af6bfb6fc25446b8c96272907108728..08d94f8cc7a0c01aac16ba05f6f58e659bd74394 100644
--- a/src/slurmd/slurmd/slurmd.c
+++ b/src/slurmd/slurmd/slurmd.c
@@ -1,8 +1,10 @@
 /*****************************************************************************\
  *  src/slurmd/slurmd/slurmd.c - main slurm node server daemon
- *  $Id: slurmd.c 13690 2008-03-21 18:17:38Z jette $
+ *  $Id: slurmd.c 14314 2008-06-23 20:57:56Z jette $
  *****************************************************************************
- *  Copyright (C) 2002-2006 The Regents of the University of California.
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Portions Copyright (C) 2008 Vijay Ramasubramanian.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>.
  *  LLNL-CODE-402394.
@@ -582,6 +584,11 @@ _read_config()
 	/* node_name may already be set from a command line parameter */
 	if (conf->node_name == NULL)
 		conf->node_name = slurm_conf_get_nodename(conf->hostname);
+	/* if we didn't match the form of the hostname already
+	 * stored in conf->hostname, check to see if we match any
+	 * valid aliases */
+	if (conf->node_name == NULL)
+		conf->node_name = slurm_conf_get_aliased_nodename();
 	if (conf->node_name == NULL)
 		conf->node_name = slurm_conf_get_nodename("localhost");
 	if (conf->node_name == NULL)
diff --git a/src/slurmd/slurmstepd/mgr.c b/src/slurmd/slurmstepd/mgr.c
index c468c7bf54982251b998707f5df987440b46a6c6..62862620cf54d013449a41074ca1faa593a8dce4 100644
--- a/src/slurmd/slurmstepd/mgr.c
+++ b/src/slurmd/slurmstepd/mgr.c
@@ -1,8 +1,9 @@
 /*****************************************************************************\
  *  src/slurmd/slurmstepd/mgr.c - job manager functions for slurmstepd
- *  $Id: mgr.c 13971 2008-05-02 20:23:00Z jette $
+ *  $Id: mgr.c 14238 2008-06-11 21:54:28Z jette $
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>.
  *  LLNL-CODE-402394.
@@ -669,10 +670,13 @@ job_manager(slurmd_job_t *job)
 	 */
 	if (switch_init() != SLURM_SUCCESS
 	    || slurmd_task_init() != SLURM_SUCCESS
-	    || mpi_hook_slurmstepd_init(&job->env) != SLURM_SUCCESS
 	    || slurm_proctrack_init() != SLURM_SUCCESS
 	    || slurm_jobacct_gather_init() != SLURM_SUCCESS) {
-		rc = SLURM_FAILURE;
+		rc = SLURM_PLUGIN_NAME_INVALID;
+		goto fail1;
+	}
+	if (mpi_hook_slurmstepd_init(&job->env) != SLURM_SUCCESS) {
+		rc = SLURM_MPI_PLUGIN_NAME_INVALID;
 		goto fail1;
 	}
 	
@@ -729,7 +733,12 @@ job_manager(slurmd_job_t *job)
 	reattach_job = job;
 
 	job->state = SLURMSTEPD_STEP_RUNNING;
-
+	
+	/* if we are not polling then we need to make sure we get some
+	 * information here
+	 */
+	if(!conf->job_acct_gather_freq)
+		jobacct_gather_g_stat_task(0);
 	/* Send job launch response with list of pids */
 	_send_launch_resp(job, 0);
 
diff --git a/src/slurmd/slurmstepd/task.c b/src/slurmd/slurmstepd/task.c
index c38df2e2c470494953e7ce69faa9a1fdd8f638d0..339ff6c4aa533266134e2ba2358073b280343bb2 100644
--- a/src/slurmd/slurmstepd/task.c
+++ b/src/slurmd/slurmstepd/task.c
@@ -1,8 +1,8 @@
 /*****************************************************************************\
  *  slurmd/slurmstepd/task.c - task launching functions for slurmstepd
- *  $Id: task.c 13672 2008-03-19 23:10:58Z jette $
  *****************************************************************************
- *  Copyright (C) 2002-2006 The Regents of the University of California.
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark A. Grondona <mgrondona@llnl.gov>.
  *  LLNL-CODE-402394.
@@ -266,7 +266,7 @@ _build_path(char* fname, char **prog_env)
 	return file_path;
 }
 
-static void
+static int
 _setup_mpi(slurmd_job_t *job, int ltaskid)
 {
 	mpi_plugin_task_info_t info[1];
@@ -282,7 +282,7 @@ _setup_mpi(slurmd_job_t *job, int ltaskid)
 	info->self = job->envtp->self;
 	info->client = job->envtp->cli;
 		
-	mpi_hook_slurmstepd_task(info, &job->env);
+	return mpi_hook_slurmstepd_task(info, &job->env);
 }
 
 
@@ -374,8 +374,12 @@ exec_task(slurmd_job_t *job, int i, int waitfd)
 			exit(1);
 		}
 
-		_setup_mpi(job, i);
-	
+		if (_setup_mpi(job, i) != SLURM_SUCCESS) {
+			error("Unable to configure MPI plugin: %m");
+			log_fini();
+			exit(1);
+		}
+
 		pdebug_stop_current(job);
 	}
 
diff --git a/src/slurmdbd/proc_req.c b/src/slurmdbd/proc_req.c
index a3065e982a1ee0c46fbb3eee92c2cc7c07de28d8..6f63b1ece476b1f39659eb251dc80dd99efe3cf1 100644
--- a/src/slurmdbd/proc_req.c
+++ b/src/slurmdbd/proc_req.c
@@ -63,6 +63,7 @@ static int   _get_accounts(void *db_conn, Buf in_buffer, Buf *out_buffer);
 static int   _get_assocs(void *db_conn, Buf in_buffer, Buf *out_buffer);
 static int   _get_clusters(void *db_conn, Buf in_buffer, Buf *out_buffer);
 static int   _get_jobs(void *db_conn, Buf in_buffer, Buf *out_buffer);
+static int   _get_jobs_cond(void *db_conn, Buf in_buffer, Buf *out_buffer);
 static int   _get_usage(uint16_t type, void *db_conn,
 			Buf in_buffer, Buf *out_buffer);
 static int   _get_users(void *db_conn, Buf in_buffer, Buf *out_buffer);
@@ -176,6 +177,9 @@ proc_req(void **db_conn, slurm_fd orig_fd,
 		case DBD_GET_JOBS:
 			rc = _get_jobs(*db_conn, in_buffer, out_buffer);
 			break;
+		case DBD_GET_JOBS_COND:
+			rc = _get_jobs_cond(*db_conn, in_buffer, out_buffer);
+			break;
 		case DBD_GET_USERS:
 			rc = _get_users(*db_conn, in_buffer, out_buffer);
 			break;
@@ -300,7 +304,7 @@ static int _add_accounts(void *db_conn,
 
 		memset(&user, 0, sizeof(acct_user_rec_t));
 		user.uid = *uid;
-		if(!assoc_mgr_fill_in_user(db_conn, &user, 1)) {
+		if(assoc_mgr_fill_in_user(db_conn, &user, 1) != SLURM_SUCCESS) {
 			comment = "Your user has not been added to the accounting system yet.";
 			error("%s", comment);
 			rc = SLURM_ERROR;
@@ -352,12 +356,15 @@ static int _add_account_coords(void *db_conn,
 	if(*uid != slurmdbd_conf->slurm_user_id
 	   && assoc_mgr_get_admin_level(db_conn, *uid) < ACCT_ADMIN_OPERATOR) {
 		ListIterator itr = NULL;
+		ListIterator itr2 = NULL;
 		acct_user_rec_t user;
 		acct_coord_rec_t *coord = NULL;
-		
+		char *acct = NULL;
+		int bad = 0;
+
 		memset(&user, 0, sizeof(acct_user_rec_t));
 		user.uid = *uid;
-		if(!assoc_mgr_fill_in_user(db_conn, &user, 1)) {
+		if(assoc_mgr_fill_in_user(db_conn, &user, 1) != SLURM_SUCCESS) {
 			comment = "Your user has not been added to the accounting system yet.";
 			error("%s", comment);
 			rc = SLURM_ERROR;
@@ -369,14 +376,23 @@ static int _add_account_coords(void *db_conn,
 			rc = ESLURM_ACCESS_DENIED;
 			goto end_it;
 		}
-		itr = list_iterator_create(user.coord_accts);
-		while((coord = list_next(itr))) {
-			if(!strcasecmp(coord->acct_name, get_msg->acct))
+		itr = list_iterator_create(get_msg->acct_list);
+		itr2 = list_iterator_create(user.coord_accts);
+		while((acct = list_next(itr))) {
+			while((coord = list_next(itr2))) {
+				if(!strcasecmp(coord->acct_name, acct))
+					break;
+			}
+			if(!coord)  {
+				bad = 1;
 				break;
+			}
+			list_iterator_reset(itr2);
 		}
+		list_iterator_destroy(itr2);
 		list_iterator_destroy(itr);
 		
-		if(!coord)  {
+		if(bad)  {
 			comment = "Your user doesn't have privilege to preform this action";
 			error("%s", comment);
 			rc = ESLURM_ACCESS_DENIED;
@@ -384,7 +400,7 @@ static int _add_account_coords(void *db_conn,
 		}
 	}
 
-	rc = acct_storage_g_add_coord(db_conn, *uid, get_msg->acct,
+	rc = acct_storage_g_add_coord(db_conn, *uid, get_msg->acct_list,
 				      get_msg->cond);
 end_it:
 	slurmdbd_free_acct_coord_msg(get_msg);
@@ -419,7 +435,7 @@ static int _add_assocs(void *db_conn,
 
 		memset(&user, 0, sizeof(acct_user_rec_t));
 		user.uid = *uid;
-		if(!assoc_mgr_fill_in_user(db_conn, &user, 1)) {
+		if(assoc_mgr_fill_in_user(db_conn, &user, 1) != SLURM_SUCCESS) {
 			comment = "Your user has not been added to the accounting system yet.";
 			error("%s", comment);
 			rc = SLURM_ERROR;
@@ -510,7 +526,7 @@ static int _add_users(void *db_conn,
 
 		memset(&user, 0, sizeof(acct_user_rec_t));
 		user.uid = *uid;
-		if(!assoc_mgr_fill_in_user(db_conn, &user, 1)) {
+		if(assoc_mgr_fill_in_user(db_conn, &user, 1) != SLURM_SUCCESS) {
 			comment = "Your user has not been added to the accounting system yet.";
 			error("%s", comment);
 			rc = SLURM_ERROR;
@@ -688,7 +704,13 @@ static int _get_jobs(void *db_conn, Buf in_buffer, Buf *out_buffer)
 	
 	memset(&sacct_params, 0, sizeof(sacct_parameters_t));
 	sacct_params.opt_cluster = get_jobs_msg->cluster_name;
-
+	sacct_params.opt_uid = -1;
+	if(get_jobs_msg->user) {
+		struct passwd *pw = NULL;
+		if ((pw=getpwnam(get_jobs_msg->user)))
+			sacct_params.opt_uid = pw->pw_uid;
+	}
+		
 	list_msg.my_list = jobacct_storage_g_get_jobs(
 		db_conn,
 		get_jobs_msg->selected_steps, get_jobs_msg->selected_parts,
@@ -705,6 +727,35 @@ static int _get_jobs(void *db_conn, Buf in_buffer, Buf *out_buffer)
 	return SLURM_SUCCESS;
 }
 
+static int _get_jobs_cond(void *db_conn, Buf in_buffer, Buf *out_buffer)
+{
+	dbd_cond_msg_t *cond_msg = NULL;
+	dbd_list_msg_t list_msg;
+	char *comment = NULL;
+
+	debug2("DBD_GET_JOBS_COND: called");
+	if (slurmdbd_unpack_cond_msg(DBD_GET_JOBS_COND, &cond_msg, in_buffer) !=
+	    SLURM_SUCCESS) {
+		comment = "Failed to unpack DBD_GET_JOBS_COND message";
+		error("%s", comment);
+		*out_buffer = make_dbd_rc_msg(SLURM_ERROR, comment, 
+					      DBD_GET_JOBS_COND);
+		return SLURM_ERROR;
+	}
+	
+	list_msg.my_list = jobacct_storage_g_get_jobs_cond(
+		db_conn, cond_msg->cond);
+	slurmdbd_free_cond_msg(DBD_GET_JOBS_COND, cond_msg);
+
+	*out_buffer = init_buf(1024);
+	pack16((uint16_t) DBD_GOT_JOBS, *out_buffer);
+	slurmdbd_pack_list_msg(DBD_GOT_JOBS, &list_msg, *out_buffer);
+	if(list_msg.my_list)
+		list_destroy(list_msg.my_list);
+	
+	return SLURM_SUCCESS;
+}
+
 static int _get_usage(uint16_t type, void *db_conn,
 		      Buf in_buffer, Buf *out_buffer)
 {
@@ -1077,8 +1128,26 @@ static int   _modify_accounts(void *db_conn,
 	}
 	
 
-	list_msg.my_list = acct_storage_g_modify_accounts(
-		db_conn, *uid, get_msg->cond, get_msg->rec);
+	if(!(list_msg.my_list = acct_storage_g_modify_accounts(
+		     db_conn, *uid, get_msg->cond, get_msg->rec))) {
+		if(errno == ESLURM_ACCESS_DENIED) {
+			comment = "Your user doesn't have privilege to preform this action";
+			rc = ESLURM_ACCESS_DENIED;
+		} else if(errno == SLURM_ERROR) {
+			comment = "Something was wrong with your query";
+			rc = SLURM_ERROR;
+		} else if(errno == SLURM_NO_CHANGE_IN_DATA) {
+			comment = "Request didn't affect anything";
+			rc = SLURM_SUCCESS;
+		} else {
+			comment = "Unkown issue";
+			rc = SLURM_ERROR;
+		}
+		error("%s", comment);
+		slurmdbd_free_modify_msg(DBD_MODIFY_ACCOUNTS, get_msg);
+		*out_buffer = make_dbd_rc_msg(rc, comment, DBD_MODIFY_ACCOUNTS);
+		return rc;		
+	}
 	slurmdbd_free_modify_msg(DBD_MODIFY_ACCOUNTS, get_msg);
 
 	*out_buffer = init_buf(1024);
@@ -1100,16 +1169,6 @@ static int   _modify_assocs(void *db_conn,
 
 	debug2("DBD_MODIFY_ASSOCS: called");
 
-	if(*uid != slurmdbd_conf->slurm_user_id
-	   && assoc_mgr_get_admin_level(db_conn, *uid) < ACCT_ADMIN_OPERATOR) {
-		comment = "Your user doesn't have privilege to preform this action";
-		error("%s", comment);
-		*out_buffer = make_dbd_rc_msg(ESLURM_ACCESS_DENIED,
-					      comment, DBD_MODIFY_ASSOCS);
-
-		return ESLURM_ACCESS_DENIED;
-	}
-
 	if (slurmdbd_unpack_modify_msg(DBD_MODIFY_ASSOCS, &get_msg, 
 				       in_buffer) != SLURM_SUCCESS) {
 		comment = "Failed to unpack DBD_MODIFY_ASSOCS message";
@@ -1120,8 +1179,31 @@ static int   _modify_assocs(void *db_conn,
 	}
 	
 
-	list_msg.my_list = acct_storage_g_modify_associations(db_conn, *uid,
-						get_msg->cond, get_msg->rec);
+	/* All authentication needs to be done inside the plugin since we are
+	 * unable to know what accounts this request is talking about
+	 * until we process it through the database.
+	 */
+
+	if(!(list_msg.my_list = acct_storage_g_modify_associations(
+		     db_conn, *uid, get_msg->cond, get_msg->rec))) {
+		if(errno == ESLURM_ACCESS_DENIED) {
+			comment = "Your user doesn't have privilege to preform this action";
+			rc = ESLURM_ACCESS_DENIED;
+		} else if(errno == SLURM_ERROR) {
+			comment = "Something was wrong with your query";
+			rc = SLURM_ERROR;
+		} else if(errno == SLURM_NO_CHANGE_IN_DATA) {
+			comment = "Request didn't affect anything";
+			rc = SLURM_SUCCESS;
+		} else {
+			comment = "Unkown issue";
+			rc = SLURM_ERROR;
+		}
+		error("%s", comment);
+		slurmdbd_free_modify_msg(DBD_MODIFY_ASSOCS, get_msg);
+		*out_buffer = make_dbd_rc_msg(rc, comment, DBD_MODIFY_ASSOCS);
+		return rc;
+	}
 
 	slurmdbd_free_modify_msg(DBD_MODIFY_ASSOCS, get_msg);
 	*out_buffer = init_buf(1024);
@@ -1163,8 +1245,26 @@ static int   _modify_clusters(void *db_conn,
 	
 	debug2("DBD_MODIFY_CLUSTERS: called");
 
-	list_msg.my_list = acct_storage_g_modify_clusters(db_conn, *uid,
-					    get_msg->cond, get_msg->rec);
+	if(!(list_msg.my_list = acct_storage_g_modify_clusters(
+		     db_conn, *uid, get_msg->cond, get_msg->rec))) {
+		if(errno == ESLURM_ACCESS_DENIED) {
+			comment = "Your user doesn't have privilege to preform this action";
+			rc = ESLURM_ACCESS_DENIED;
+		} else if(errno == SLURM_ERROR) {
+			comment = "Something was wrong with your query";
+			rc = SLURM_ERROR;
+		} else if(errno == SLURM_NO_CHANGE_IN_DATA) {
+			comment = "Request didn't affect anything";
+			rc = SLURM_SUCCESS;
+		} else {
+			comment = "Unkown issue";
+			rc = SLURM_ERROR;
+		}
+		error("%s", comment);
+		slurmdbd_free_modify_msg(DBD_MODIFY_CLUSTERS, get_msg);
+		*out_buffer = make_dbd_rc_msg(rc, comment, DBD_MODIFY_CLUSTERS);
+		return rc;
+	}
 
 	slurmdbd_free_modify_msg(DBD_MODIFY_CLUSTERS, get_msg);
 	*out_buffer = init_buf(1024);
@@ -1214,11 +1314,29 @@ static int   _modify_users(void *db_conn,
 			ACCT_ADMIN_NOTSET;
 	}
 
-	list_msg.my_list = acct_storage_g_modify_users(
-		db_conn, *uid, get_msg->cond, get_msg->rec);
+	if(!(list_msg.my_list = acct_storage_g_modify_users(
+		     db_conn, *uid, get_msg->cond, get_msg->rec))) {
+		if(errno == ESLURM_ACCESS_DENIED) {
+			comment = "Your user doesn't have privilege to preform this action";
+			rc = ESLURM_ACCESS_DENIED;
+		} else if(errno == SLURM_ERROR) {
+			comment = "Something was wrong with your query";
+			rc = SLURM_ERROR;
+		} else if(errno == SLURM_NO_CHANGE_IN_DATA) {
+			comment = "Request didn't affect anything";
+			rc = SLURM_SUCCESS;
+		} else {
+			comment = "Unkown issue";
+			rc = SLURM_ERROR;
+		}
+		error("%s", comment);
+		slurmdbd_free_modify_msg(DBD_MODIFY_USERS, get_msg);
+		*out_buffer = make_dbd_rc_msg(rc, comment, DBD_MODIFY_USERS);
+		return rc;
+	}
 
 	slurmdbd_free_modify_msg(DBD_MODIFY_USERS, get_msg);
-		*out_buffer = init_buf(1024);
+	*out_buffer = init_buf(1024);
 	pack16((uint16_t) DBD_GOT_LIST, *out_buffer);
 	slurmdbd_pack_list_msg(DBD_GOT_LIST, &list_msg, *out_buffer);
 	if(list_msg.my_list)
@@ -1404,18 +1522,29 @@ static int   _remove_accounts(void *db_conn,
 		return SLURM_ERROR;
 	}
 	
-	list_msg.my_list = acct_storage_g_remove_accounts(
-		db_conn, *uid, get_msg->cond);
-/* this should be done inside the plugin */
-/* 	if(rc == SLURM_SUCCESS) { */
-/* 		memset(&assoc_q, 0, sizeof(acct_association_cond_t)); */
-/* 		assoc_q.acct_list = */
-/* 			((acct_account_cond_t *)get_msg->cond)->acct_list; */
-/* 		list_msg.my_list = acct_storage_g_remove_associations(db_conn, *uid, &assoc_q); */
-/* 	} */
+	if(!(list_msg.my_list = acct_storage_g_remove_accounts(
+		     db_conn, *uid, get_msg->cond))) {
+		if(errno == ESLURM_ACCESS_DENIED) {
+			comment = "Your user doesn't have privilege to preform this action";
+			rc = ESLURM_ACCESS_DENIED;
+		} else if(errno == SLURM_ERROR) {
+			comment = "Something was wrong with your query";
+			rc = SLURM_ERROR;
+		} else if(errno == SLURM_NO_CHANGE_IN_DATA) {
+			comment = "Request didn't affect anything";
+			rc = SLURM_SUCCESS;
+		} else {
+			comment = "Unkown issue";
+			rc = SLURM_ERROR;
+		}
+		error("%s", comment);
+		slurmdbd_free_cond_msg(DBD_REMOVE_ACCOUNTS, get_msg);
+		*out_buffer = make_dbd_rc_msg(rc, comment, DBD_REMOVE_ACCOUNTS);
+		return rc;
+	}
 
 	slurmdbd_free_cond_msg(DBD_REMOVE_ACCOUNTS, get_msg);
-		*out_buffer = init_buf(1024);
+	*out_buffer = init_buf(1024);
 	pack16((uint16_t) DBD_GOT_LIST, *out_buffer);
 	slurmdbd_pack_list_msg(DBD_GOT_LIST, &list_msg, *out_buffer);
 	if(list_msg.my_list)
@@ -1435,37 +1564,54 @@ static int   _remove_account_coords(void *db_conn,
 
 	debug2("DBD_REMOVE_ACCOUNT_COORDS: called");
 
-	if(*uid != slurmdbd_conf->slurm_user_id
-	   && assoc_mgr_get_admin_level(db_conn, *uid) < ACCT_ADMIN_OPERATOR) {
-		comment = "Your user doesn't have privilege to preform this action";
-		error("%s", comment);
-		*out_buffer = make_dbd_rc_msg(
-			ESLURM_ACCESS_DENIED, comment,
-			DBD_REMOVE_ACCOUNT_COORDS);
-
-		return ESLURM_ACCESS_DENIED;
-	}
-
 	if (slurmdbd_unpack_acct_coord_msg(&get_msg, in_buffer) !=
 	    SLURM_SUCCESS) {
 		comment = "Failed to unpack DBD_REMOVE_ACCOUNT_COORDS message";
 		error("%s", comment);
-		*out_buffer = make_dbd_rc_msg(
-			SLURM_ERROR, comment, DBD_REMOVE_ACCOUNT_COORDS);
-		return SLURM_ERROR;
+		rc = SLURM_ERROR;
+		goto end_it;
 	}
 	
-	list_msg.my_list = acct_storage_g_remove_coord(
-		db_conn, *uid, get_msg->acct, get_msg->cond);
+	/* All authentication needs to be done inside the plugin since we are
+	 * unable to know what accounts this request is talking about
+	 * until we process it through the database.
+	 */
+
+	if(!(list_msg.my_list = acct_storage_g_remove_coord(
+		     db_conn, *uid, get_msg->acct_list, get_msg->cond))) {
+		if(errno == ESLURM_ACCESS_DENIED) {
+			comment = "Your user doesn't have privilege to preform this action";
+			rc = ESLURM_ACCESS_DENIED;
+		} else if(errno == SLURM_ERROR) {
+			comment = "Something was wrong with your query";
+			rc = SLURM_ERROR;
+		} else if(errno == SLURM_NO_CHANGE_IN_DATA) {
+			comment = "Request didn't affect anything";
+			rc = SLURM_SUCCESS;
+		} else {
+			comment = "Unkown issue";
+			rc = SLURM_ERROR;
+		}
+		error("%s", comment);
+		slurmdbd_free_acct_coord_msg(get_msg);
+		*out_buffer = make_dbd_rc_msg(rc, comment, 
+					      DBD_REMOVE_ACCOUNT_COORDS);
+		return rc;
+	}
 
 	slurmdbd_free_acct_coord_msg(get_msg);
-		*out_buffer = init_buf(1024);
+	*out_buffer = init_buf(1024);
 	pack16((uint16_t) DBD_GOT_LIST, *out_buffer);
 	slurmdbd_pack_list_msg(DBD_GOT_LIST, &list_msg, *out_buffer);
 	if(list_msg.my_list)
 		list_destroy(list_msg.my_list);
 
 	return rc;
+end_it:
+	slurmdbd_free_acct_coord_msg(get_msg);
+	*out_buffer = make_dbd_rc_msg(rc, comment, DBD_ADD_ACCOUNT_COORDS);
+	return rc;
+
 }
 
 static int   _remove_assocs(void *db_conn,
@@ -1477,17 +1623,6 @@ static int   _remove_assocs(void *db_conn,
 	char *comment = NULL;
 
 	debug2("DBD_REMOVE_ASSOCS: called");
-
-	if(*uid != slurmdbd_conf->slurm_user_id
-	   && assoc_mgr_get_admin_level(db_conn, *uid) < ACCT_ADMIN_OPERATOR) {
-		comment = "Your user doesn't have privilege to preform this action";
-		error("%s", comment);
-		*out_buffer = make_dbd_rc_msg(ESLURM_ACCESS_DENIED,
-					      comment, DBD_REMOVE_ASSOCS);
-
-		return ESLURM_ACCESS_DENIED;
-	}
-
 	if (slurmdbd_unpack_cond_msg(DBD_REMOVE_ASSOCS, &get_msg, in_buffer) !=
 	    SLURM_SUCCESS) {
 		comment = "Failed to unpack DBD_REMOVE_ASSOCS message";
@@ -1496,12 +1631,35 @@ static int   _remove_assocs(void *db_conn,
 					      comment, DBD_REMOVE_ASSOCS);
 		return SLURM_ERROR;
 	}
-	
-	list_msg.my_list = acct_storage_g_remove_associations(
-		db_conn, *uid, get_msg->cond);
 
+	/* All authentication needs to be done inside the plugin since we are
+	 * unable to know what accounts this request is talking about
+	 * until we process it through the database.
+	 */
+
+	if(!(list_msg.my_list = acct_storage_g_remove_associations(
+		     db_conn, *uid, get_msg->cond))) {
+		if(errno == ESLURM_ACCESS_DENIED) {
+			comment = "Your user doesn't have privilege to preform this action";
+			rc = ESLURM_ACCESS_DENIED;
+		} else if(errno == SLURM_ERROR) {
+			comment = "Something was wrong with your query";
+			rc = SLURM_ERROR;
+		} else if(errno == SLURM_NO_CHANGE_IN_DATA) {
+			comment = "Request didn't affect anything";
+			rc = SLURM_SUCCESS;
+		} else {
+			comment = "Unkown issue";
+			rc = SLURM_ERROR;
+		}
+		error("%s", comment);
+		slurmdbd_free_cond_msg(DBD_REMOVE_ASSOCS, get_msg);
+		*out_buffer = make_dbd_rc_msg(rc, comment, DBD_REMOVE_ASSOCS);
+		return rc;
+	}
+	
 	slurmdbd_free_cond_msg(DBD_REMOVE_ASSOCS, get_msg);
-		*out_buffer = init_buf(1024);
+	*out_buffer = init_buf(1024);
 	pack16((uint16_t) DBD_GOT_LIST, *out_buffer);
 	slurmdbd_pack_list_msg(DBD_GOT_LIST, &list_msg, *out_buffer);
 	if(list_msg.my_list)
@@ -1541,18 +1699,29 @@ static int   _remove_clusters(void *db_conn,
 		return SLURM_ERROR;
 	}
 	
-	list_msg.my_list = acct_storage_g_remove_clusters(
-		db_conn, *uid, get_msg->cond);
-/* this should be done inside the plugin */
-/* 	if(rc == SLURM_SUCCESS) { */
-/* 		memset(&assoc_q, 0, sizeof(acct_association_cond_t)); */
-/* 		assoc_q.cluster_list = */
-/* 			((acct_cluster_cond_t *)get_msg->cond)->cluster_list; */
-/* 		list_msg.my_list = acct_storage_g_remove_associations(db_conn, *uid, &assoc_q); */
-/* 	} */
+	if(!(list_msg.my_list = acct_storage_g_remove_clusters(
+		     db_conn, *uid, get_msg->cond))) {
+		if(errno == ESLURM_ACCESS_DENIED) {
+			comment = "Your user doesn't have privilege to preform this action";
+			rc = ESLURM_ACCESS_DENIED;
+		} else if(errno == SLURM_ERROR) {
+			comment = "Something was wrong with your query";
+			rc = SLURM_ERROR;
+		} else if(errno == SLURM_NO_CHANGE_IN_DATA) {
+			comment = "Request didn't affect anything";
+			rc = SLURM_SUCCESS;
+		} else {
+			comment = "Unkown issue";
+			rc = SLURM_ERROR;
+		}
+		error("%s", comment);
+		slurmdbd_free_cond_msg(DBD_REMOVE_CLUSTERS, get_msg);
+		*out_buffer = make_dbd_rc_msg(rc, comment, DBD_REMOVE_CLUSTERS);
+		return rc;		
+	}
 
 	slurmdbd_free_cond_msg(DBD_REMOVE_CLUSTERS, get_msg);
-		*out_buffer = init_buf(1024);
+	*out_buffer = init_buf(1024);
 	pack16((uint16_t) DBD_GOT_LIST, *out_buffer);
 	slurmdbd_pack_list_msg(DBD_GOT_LIST, &list_msg, *out_buffer);
 	if(list_msg.my_list)
@@ -1590,18 +1759,29 @@ static int   _remove_users(void *db_conn,
 		return SLURM_ERROR;
 	}
 	
-	list_msg.my_list = acct_storage_g_remove_users(
-		db_conn, *uid, get_msg->cond);
-/* this should be done inside the plugin */
-	/* if(rc == SLURM_SUCCESS) { */
-/* 		memset(&assoc_q, 0, sizeof(acct_association_cond_t)); */
-/* 		assoc_q.user_list = */
-/* 			((acct_user_cond_t *)get_msg->cond)->user_list; */
-/* 		list_msg.my_list = acct_storage_g_remove_associations(db_conn, *uid, &assoc_q); */
-/* 	} */
+	if(!(list_msg.my_list = acct_storage_g_remove_users(
+		     db_conn, *uid, get_msg->cond))) {
+		if(errno == ESLURM_ACCESS_DENIED) {
+			comment = "Your user doesn't have privilege to preform this action";
+			rc = ESLURM_ACCESS_DENIED;
+		} else if(errno == SLURM_ERROR) {
+			comment = "Something was wrong with your query";
+			rc = SLURM_ERROR;
+		} else if(errno == SLURM_NO_CHANGE_IN_DATA) {
+			comment = "Request didn't affect anything";
+			rc = SLURM_SUCCESS;
+		} else {
+			comment = "Unkown issue";
+			rc = SLURM_ERROR;
+		}
+		error("%s", comment);
+		slurmdbd_free_cond_msg(DBD_REMOVE_USERS, get_msg);
+		*out_buffer = make_dbd_rc_msg(rc, comment, DBD_REMOVE_USERS);
+		return rc;
+	}
 
 	slurmdbd_free_cond_msg(DBD_REMOVE_USERS, get_msg);
-		*out_buffer = init_buf(1024);
+	*out_buffer = init_buf(1024);
 	pack16((uint16_t) DBD_GOT_LIST, *out_buffer);
 	slurmdbd_pack_list_msg(DBD_GOT_LIST, &list_msg, *out_buffer);
 	if(list_msg.my_list)
@@ -1678,6 +1858,7 @@ static int  _step_complete(void *db_conn,
 	job.assoc_id = step_comp_msg->assoc_id;
 	job.db_index = step_comp_msg->db_index;
 	job.end_time = step_comp_msg->end_time;
+	step.exit_code = step_comp_msg->exit_code;
 	step.jobacct = step_comp_msg->jobacct;
 	job.job_id = step_comp_msg->job_id;
 	job.requid = step_comp_msg->req_uid;
diff --git a/src/slurmdbd/read_config.c b/src/slurmdbd/read_config.c
index 73bf525190b38374d8e854aee72e8c7512a86747..e14489454bb3d2e75d923482d4be2016152f6d95 100644
--- a/src/slurmdbd/read_config.c
+++ b/src/slurmdbd/read_config.c
@@ -75,15 +75,19 @@ extern void free_slurmdbd_conf(void)
 static void _clear_slurmdbd_conf(void)
 {
 	if (slurmdbd_conf) {
+		slurmdbd_conf->archive_age = 0;
+		xfree(slurmdbd_conf->archive_script);
 		xfree(slurmdbd_conf->auth_info);
 		xfree(slurmdbd_conf->auth_type);
 		xfree(slurmdbd_conf->dbd_addr);
 		xfree(slurmdbd_conf->dbd_host);
 		slurmdbd_conf->dbd_port = 0;
+		slurmdbd_conf->job_purge = 0;
 		xfree(slurmdbd_conf->log_file);
 		xfree(slurmdbd_conf->pid_file);
 		xfree(slurmdbd_conf->plugindir);
 		xfree(slurmdbd_conf->slurm_user_name);
+		slurmdbd_conf->step_purge = 0;
 		xfree(slurmdbd_conf->storage_host);
 		xfree(slurmdbd_conf->storage_loc);
 		xfree(slurmdbd_conf->storage_pass);
@@ -102,17 +106,21 @@ static void _clear_slurmdbd_conf(void)
 extern int read_slurmdbd_conf(void)
 {
 	s_p_options_t options[] = {
+		{"ArchiveAge", S_P_UINT16},
+		{"ArchiveScript", S_P_STRING},
 		{"AuthInfo", S_P_STRING},
 		{"AuthType", S_P_STRING},
 		{"DbdAddr", S_P_STRING},
 		{"DbdHost", S_P_STRING},
 		{"DbdPort", S_P_UINT16},
 		{"DebugLevel", S_P_UINT16},
+		{"JobPurge", S_P_UINT16},
 		{"LogFile", S_P_STRING},
 		{"MessageTimeout", S_P_UINT16},
 		{"PidFile", S_P_STRING},
 		{"PluginDir", S_P_STRING},
 		{"SlurmUser", S_P_STRING},
+		{"StepPurge", S_P_UINT16},
 		{"StorageHost", S_P_STRING},
 		{"StorageLoc", S_P_STRING},
 		{"StoragePass", S_P_STRING},
@@ -144,12 +152,16 @@ extern int read_slurmdbd_conf(void)
 		 	     conf_path);
 		}
 
+		s_p_get_uint16(&slurmdbd_conf->archive_age, "ArchiveAge", tbl);
+		s_p_get_string(&slurmdbd_conf->archive_script, "ArchiveScript", tbl);
 		s_p_get_string(&slurmdbd_conf->auth_info, "AuthInfo", tbl);
 		s_p_get_string(&slurmdbd_conf->auth_type, "AuthType", tbl);
 		s_p_get_string(&slurmdbd_conf->dbd_host, "DbdHost", tbl);
 		s_p_get_string(&slurmdbd_conf->dbd_addr, "DbdAddr", tbl);
 		s_p_get_uint16(&slurmdbd_conf->dbd_port, "DbdPort", tbl);
 		s_p_get_uint16(&slurmdbd_conf->debug_level, "DebugLevel", tbl);
+		if (!s_p_get_uint16(&slurmdbd_conf->job_purge, "JobPurge", tbl))
+			slurmdbd_conf->job_purge = DEFAULT_SLURMDBD_JOB_PURGE;
 		s_p_get_string(&slurmdbd_conf->log_file, "LogFile", tbl);
 		if (!s_p_get_uint16(&slurmdbd_conf->msg_timeout,
 				    "MessageTimeout", tbl))
@@ -162,6 +174,8 @@ extern int read_slurmdbd_conf(void)
 		s_p_get_string(&slurmdbd_conf->plugindir, "PluginDir", tbl);
 		s_p_get_string(&slurmdbd_conf->slurm_user_name, "SlurmUser",
 			       tbl);
+		if (!s_p_get_uint16(&slurmdbd_conf->step_purge, "StepPurge", tbl))
+			slurmdbd_conf->step_purge = DEFAULT_SLURMDBD_STEP_PURGE;
 		s_p_get_string(&slurmdbd_conf->storage_host,
 				"StorageHost", tbl);
 		s_p_get_string(&slurmdbd_conf->storage_loc,
@@ -215,18 +229,26 @@ extern int read_slurmdbd_conf(void)
 /* Log the current configuration using verbose() */
 extern void log_config(void)
 {
+	if (slurmdbd_conf->archive_age) {
+		debug2("ArchiveAge        = %u days", 
+		       slurmdbd_conf->archive_age);
+	} else
+		debug2("ArchiveAge        = NONE");
+	debug2("ArchiveScript     = %s", slurmdbd_conf->archive_script);
 	debug2("AuthInfo          = %s", slurmdbd_conf->auth_info);
 	debug2("AuthType          = %s", slurmdbd_conf->auth_type);
 	debug2("DbdAddr           = %s", slurmdbd_conf->dbd_addr);
 	debug2("DbdHost           = %s", slurmdbd_conf->dbd_host);
 	debug2("DbdPort           = %u", slurmdbd_conf->dbd_port);
 	debug2("DebugLevel        = %u", slurmdbd_conf->debug_level);
+	debug2("JobPurge          = %u days", slurmdbd_conf->job_purge);
 	debug2("LogFile           = %s", slurmdbd_conf->log_file);
 	debug2("MessageTimeout    = %u", slurmdbd_conf->msg_timeout);
 	debug2("PidFile           = %s", slurmdbd_conf->pid_file);
 	debug2("PluginDir         = %s", slurmdbd_conf->plugindir);
 	debug2("SlurmUser         = %s(%u)", 
-		slurmdbd_conf->slurm_user_name, slurmdbd_conf->slurm_user_id); 
+		slurmdbd_conf->slurm_user_name, slurmdbd_conf->slurm_user_id);
+	debug2("StepPurge         = %u days", slurmdbd_conf->step_purge); 
 	debug2("StorageHost       = %s", slurmdbd_conf->storage_host);
 	debug2("StorageLoc        = %s", slurmdbd_conf->storage_loc);
 	debug2("StoragePass       = %s", slurmdbd_conf->storage_pass);
diff --git a/src/slurmdbd/read_config.h b/src/slurmdbd/read_config.h
index 79d09c5141db385e4e57ea806ee13f6c439cef75..8c5895252593af2bafb64622a09fdfe366db54fb 100644
--- a/src/slurmdbd/read_config.h
+++ b/src/slurmdbd/read_config.h
@@ -55,25 +55,31 @@
 #include <time.h>
 
 #define DEFAULT_SLURMDBD_AUTHTYPE	"auth/none"
+#define DEFAULT_SLURMDBD_JOB_PURGE	360
 #define DEFAULT_SLURMDBD_PIDFILE	"/var/run/slurmdbd.pid"
+#define DEFAULT_SLURMDBD_STEP_PURGE	30
 
 /* SlurmDBD configuration parameters */
 typedef struct slurm_dbd_conf {
 	time_t		last_update;	/* time slurmdbd.conf read	*/
+	uint16_t	archive_age;	/* archive data this age	*/
+	char *		archive_script;	/* script to archive old data	*/
 	char *		auth_info;	/* authentication info		*/
 	char *		auth_type;	/* authentication mechanism	*/
 	char *		dbd_addr;	/* network address of Slurm DBD	*/
 	char *		dbd_host;	/* hostname of Slurm DBD	*/
 	uint16_t	dbd_port;	/* port number for RPCs to DBD	*/
 	uint16_t	debug_level;	/* Debug level, default=3	*/
+	uint16_t	job_purge;	/* purge time for job info	*/ 
 	char *		log_file;	/* Log file			*/
-	uint16_t        msg_timeout;    /* message timeout */   
+	uint16_t        msg_timeout;    /* message timeout		*/   
 	char *		pid_file;	/* where to store current PID	*/
 	char *		plugindir;	/* dir to look for plugins	*/
 	uint32_t	slurm_user_id;	/* uid of slurm_user_name	*/
 	char *		slurm_user_name;/* user that slurmcdtld runs as	*/
+	uint16_t	step_purge;	/* purge time for step info	*/
 	char *		storage_host;	/* host where DB is running	*/
-	char *		storage_loc;	/* database name */
+	char *		storage_loc;	/* database name		*/
 	char *		storage_pass;   /* password for DB write	*/
 	uint16_t	storage_port;	/* port DB is listening to	*/
 	char *		storage_type;	/* DB to be used for storage	*/
diff --git a/src/slurmdbd/slurmdbd.c b/src/slurmdbd/slurmdbd.c
index 6738308ef6a7a451d26c202f3c95b5caf12446c2..ae2a361ca398624ba3ea01ad819b06c07f733376 100644
--- a/src/slurmdbd/slurmdbd.c
+++ b/src/slurmdbd/slurmdbd.c
@@ -139,7 +139,7 @@ int main(int argc, char *argv[])
 
 	db_conn = acct_storage_g_get_connection(false, false);
 	
-	if(assoc_mgr_init(db_conn, 0) == SLURM_ERROR) {
+	if(assoc_mgr_init(db_conn, NULL) == SLURM_ERROR) {
 		error("Problem getting cache of data");
 		acct_storage_g_close_connection(&db_conn);
 		goto end_it;
diff --git a/src/smap/configure_functions.c b/src/smap/configure_functions.c
index 0f15f36287f2571cd2559eef18eb08fa444917e8..45ee0ffa0b6d0ef06f97d55234c97cc3e68e35f9 100644
--- a/src/smap/configure_functions.c
+++ b/src/smap/configure_functions.c
@@ -1,6 +1,6 @@
 /*****************************************************************************\
  *  configure_functions.c - Functions related to configure mode of smap.
- *  $Id: configure_functions.c 13783 2008-04-03 00:07:07Z da $
+ *  $Id: configure_functions.c 14295 2008-06-19 23:58:28Z da $
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -986,6 +986,8 @@ static int _add_bg_record(blockreq_t *blockreq, List allocated_blocks)
 #ifdef HAVE_BG
 	char *nodes = NULL, *conn_type = NULL;
 	int bp_count = 0;
+	int diff=0;
+	int largest_diff=-1;
 	int start[BA_SYSTEM_DIMENSIONS];
 	int end[BA_SYSTEM_DIMENSIONS];
 	int start1[BA_SYSTEM_DIMENSIONS];
@@ -996,10 +998,6 @@ static int _add_bg_record(blockreq_t *blockreq, List allocated_blocks)
 	int len = 0;
 	int x,y,z;
 	
-	start1[X] = 0;
-	start1[Y] = 0;
-	start1[Z] = 0;
-	
 	geo[X] = 0;
 	geo[Y] = 0;
 	geo[Z] = 0;
@@ -1052,7 +1050,8 @@ static int _add_bg_record(blockreq_t *blockreq, List allocated_blocks)
 			end[Z] = (number % HOSTLIST_BASE);
 
 			j += 3;
-			if(!bp_count) {
+			diff = end[X]-start[X];
+			if(diff > largest_diff) {
 				start1[X] = start[X];
 				start1[Y] = start[Y];
 				start1[Z] = start[Z];
@@ -1087,7 +1086,8 @@ static int _add_bg_record(blockreq_t *blockreq, List allocated_blocks)
 			start[Z] = (number % HOSTLIST_BASE);
 			
 			j+=3;
-			if(!bp_count) {
+			diff = 0;
+			if(diff > largest_diff) {
 				start1[X] = start[X];
 				start1[Y] = start[Y];
 				start1[Z] = start[Z];
@@ -1117,7 +1117,13 @@ static int _add_bg_record(blockreq_t *blockreq, List allocated_blocks)
 		geo[X], geo[Y], geo[Z], conn_type, 
 		start1[X], start1[Y], start1[Z],
 		blockreq->nodecards, blockreq->quarters);
+	if(!strcasecmp(layout_mode, "OVERLAP")) 
+		reset_ba_system(false);
+	
+	set_all_bps_except(nodes);
 	_create_allocation(com, allocated_blocks);
+	reset_all_removed_bps();
+	
 #endif
 	return SLURM_SUCCESS;
 }
diff --git a/src/squeue/print.c b/src/squeue/print.c
index 0225f2ca31608812a36f2d6ee9b416c0cfd829ca..0985f5ed2d17d249bce68e559d6e7f1d082d771d 100644
--- a/src/squeue/print.c
+++ b/src/squeue/print.c
@@ -488,6 +488,8 @@ int _print_job_time_start(job_info_t * job, int width, bool right,
 {
 	if (job == NULL)	/* Print the Header instead */
 		_print_str("START", width, right, true);
+	else if (job->job_state == JOB_PENDING)
+		_print_time((time_t) 0, 0, width, right);
 	else
 		_print_time(job->start_time, 0, width, right);
 	if (suffix)
diff --git a/src/squeue/squeue.c b/src/squeue/squeue.c
index 7a0a5f3a72b8699284bae99d9822a502d639cc14..12ab9db3b4c2c3a855e0dff8c94e1a121ccab519 100644
--- a/src/squeue/squeue.c
+++ b/src/squeue/squeue.c
@@ -1,7 +1,7 @@
 /*****************************************************************************\
  *  squeue.c - Report jobs in the slurm system
  *
- *  $Id: squeue.c 13672 2008-03-19 23:10:58Z jette $
+ *  $Id: squeue.c 14165 2008-05-30 21:23:22Z jette $
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -72,8 +72,12 @@ main (int argc, char *argv[])
 {
 	log_options_t opts = LOG_OPTS_STDERR_ONLY ;
 
-	log_init(xbasename(argv[0]), opts, SYSLOG_FACILITY_DAEMON, NULL);
+	log_init(xbasename(argv[0]), opts, SYSLOG_FACILITY_USER, NULL);
 	parse_command_line( argc, argv );
+	if (params.verbose) {
+		opts.stderr_level += params.verbose;
+		log_alter(opts, SYSLOG_FACILITY_USER, NULL);
+	}
 	max_line_size = _get_window_width( );
 	
 	while (1) 
diff --git a/src/sreport/Makefile.am b/src/sreport/Makefile.am
index a4bf7fccb98431dd184120e564b1488a31b00ba5..c01f23057d469da3d3532e4d46e4e15025471fa8 100644
--- a/src/sreport/Makefile.am
+++ b/src/sreport/Makefile.am
@@ -7,7 +7,12 @@ INCLUDES = -I$(top_srcdir)
 bin_PROGRAMS = sreport
 
 sreport_SOURCES =	\
-	sreport.c sreport.h	
+	sreport.c sreport.h \
+	cluster_reports.c cluster_reports.h \
+	assoc_reports.c assoc_reports.h	\
+	job_reports.c job_reports.h	\
+	user_reports.c user_reports.h	\
+	common.c
 
 sreport_LDADD =  \
 	$(top_builddir)/src/common/libcommon.o -ldl \
diff --git a/src/sreport/Makefile.in b/src/sreport/Makefile.in
index e15b13e502bae9d78558c4073f0395a214b7f964..b32e054f34d07dc963154e8319cd0b2458feee35 100644
--- a/src/sreport/Makefile.in
+++ b/src/sreport/Makefile.in
@@ -70,7 +70,9 @@ CONFIG_CLEAN_FILES =
 am__installdirs = "$(DESTDIR)$(bindir)"
 binPROGRAMS_INSTALL = $(INSTALL_PROGRAM)
 PROGRAMS = $(bin_PROGRAMS)
-am_sreport_OBJECTS = sreport.$(OBJEXT)
+am_sreport_OBJECTS = sreport.$(OBJEXT) cluster_reports.$(OBJEXT) \
+	assoc_reports.$(OBJEXT) job_reports.$(OBJEXT) \
+	user_reports.$(OBJEXT) common.$(OBJEXT)
 sreport_OBJECTS = $(am_sreport_OBJECTS)
 am__DEPENDENCIES_1 =
 sreport_DEPENDENCIES = $(top_builddir)/src/common/libcommon.o \
@@ -266,7 +268,12 @@ top_srcdir = @top_srcdir@
 AUTOMAKE_OPTIONS = foreign
 INCLUDES = -I$(top_srcdir)
 sreport_SOURCES = \
-	sreport.c sreport.h	
+	sreport.c sreport.h \
+	cluster_reports.c cluster_reports.h \
+	assoc_reports.c assoc_reports.h	\
+	job_reports.c job_reports.h	\
+	user_reports.c user_reports.h	\
+	common.c
 
 sreport_LDADD = \
 	$(top_builddir)/src/common/libcommon.o -ldl \
@@ -345,7 +352,12 @@ mostlyclean-compile:
 distclean-compile:
 	-rm -f *.tab.c
 
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/assoc_reports.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cluster_reports.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/common.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/job_reports.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sreport.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/user_reports.Po@am__quote@
 
 .c.o:
 @am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
diff --git a/src/sreport/assoc_reports.c b/src/sreport/assoc_reports.c
new file mode 100644
index 0000000000000000000000000000000000000000..40d718f1a97c475b93040897c4b8b9e95cf8767a
--- /dev/null
+++ b/src/sreport/assoc_reports.c
@@ -0,0 +1,40 @@
+/*****************************************************************************\
+ *  assoc_resports.c - functions for generating association reports
+ *                     from accounting infrastructure.
+ *****************************************************************************
+ *
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble <da@llnl.gov>
+ *  LLNL-CODE-402394.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#include "assoc_reports.h"
diff --git a/src/sreport/assoc_reports.h b/src/sreport/assoc_reports.h
new file mode 100644
index 0000000000000000000000000000000000000000..702d96e9eb151e65b23df623007604e555fa88f6
--- /dev/null
+++ b/src/sreport/assoc_reports.h
@@ -0,0 +1,46 @@
+/*****************************************************************************\
+ *  assoc_reports.h - functions for generating association reports
+ *                     from accounting infrastructure.
+ *****************************************************************************
+ *
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble <da@llnl.gov>
+ *  LLNL-CODE-402394.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef __SREPORT_ASSOC_REPORTS_H
+#define __SREPORT_ASSOC_REPORTS_H
+
+#include "sreport.h"
+
+
+#endif
diff --git a/src/sreport/cluster_reports.c b/src/sreport/cluster_reports.c
new file mode 100644
index 0000000000000000000000000000000000000000..85759e62f3b8243b7b8e3e03e9d1f4074ca0b81a
--- /dev/null
+++ b/src/sreport/cluster_reports.c
@@ -0,0 +1,383 @@
+/*****************************************************************************\
+ *  cluster_reports.c - functions for generating cluster reports
+ *                       from accounting infrastructure.
+ *****************************************************************************
+ *
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble <da@llnl.gov>
+ *  LLNL-CODE-402394.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#include "cluster_reports.h"
+
+enum {
+	PRINT_CLUSTER_NAME,
+	PRINT_CLUSTER_CPUS,
+	PRINT_CLUSTER_ACPU,
+	PRINT_CLUSTER_DCPU,
+	PRINT_CLUSTER_ICPU,
+	PRINT_CLUSTER_OCPU,
+	PRINT_CLUSTER_RCPU,
+	PRINT_CLUSTER_TOTAL
+};
+
+typedef enum {
+	GROUP_BY_ACCOUNT,
+	GROUP_BY_ACCOUNT_JOB_SIZE,
+	GROUP_BY_ACCOUNT_JOB_SIZE_DURATION,
+	GROUP_BY_USER,
+	GROUP_BY_USER_JOB_SIZE,
+	GROUP_BY_USER_JOB_SIZE_DURATION,
+	GROUP_BY_NONE
+} report_grouping_t;
+
+static List print_fields_list = NULL; /* types are of print_field_t */
+
+static int _set_cond(int *start, int argc, char *argv[],
+		     acct_cluster_cond_t *cluster_cond,
+		     List format_list)
+{
+	int i;
+	int set = 0;
+	int end = 0;
+	int local_cluster_flag = all_clusters_flag;
+
+	for (i=(*start); i<argc; i++) {
+		end = parse_option_end(argv[i]);
+		if (strncasecmp (argv[i], "Set", 3) == 0) {
+			i--;
+			break;
+		} else if(!end && !strncasecmp(argv[i], "where", 5)) {
+			continue;
+		} else if(!end && !strncasecmp(argv[i], "all_clusters", 1)) {
+			local_cluster_flag = 1;
+			continue;
+		} else if(!end) {
+			addto_char_list(cluster_cond->cluster_list, argv[i]);
+			set = 1;
+		} else if (strncasecmp (argv[i], "End", 1) == 0) {
+			cluster_cond->usage_end = parse_time(argv[i]+end);
+			set = 1;
+		} else if (strncasecmp (argv[i], "Format", 1) == 0) {
+			if(format_list)
+				addto_char_list(format_list, argv[i]+end);
+		} else if (strncasecmp (argv[i], "Names", 1) == 0) {
+			addto_char_list(cluster_cond->cluster_list,
+					argv[i]+end);
+			set = 1;
+		} else if (strncasecmp (argv[i], "Start", 1) == 0) {
+			cluster_cond->usage_start = parse_time(argv[i]+end);
+			set = 1;
+		} else {
+			printf(" Unknown condition: %s\n"
+			       "Use keyword set to modify value\n", argv[i]);
+		}
+	}
+	(*start) = i;
+
+	if(!local_cluster_flag && !list_count(cluster_cond->cluster_list)) {
+		char *temp = slurm_get_cluster_name();
+		if(temp)
+			list_append(cluster_cond->cluster_list, temp);
+	}
+
+	set_start_end_time((time_t *)&cluster_cond->usage_start,
+			   (time_t *)&cluster_cond->usage_end);
+
+	return set;
+}
+
+static int _setup_print_fields_list(List format_list)
+{
+	ListIterator itr = NULL;
+	print_field_t *field = NULL;
+	char *object = NULL;
+
+	if(!format_list || !list_count(format_list)) {
+		printf(" error: we need a format list to set up the print.\n");
+		return SLURM_ERROR;
+	}
+
+	if(!print_fields_list)
+		print_fields_list = list_create(destroy_print_field);
+
+	itr = list_iterator_create(format_list);
+	while((object = list_next(itr))) {
+		field = xmalloc(sizeof(print_field_t));
+		if(!strncasecmp("Cluster", object, 2)) {
+			field->type = PRINT_CLUSTER_NAME;
+			field->name = xstrdup("Cluster");
+			field->len = 9;
+			field->print_routine = print_fields_str;
+		} else if(!strncasecmp("cpu_count", object, 2)) {
+			field->type = PRINT_CLUSTER_CPUS;
+			field->name = xstrdup("CPU count");
+			field->len = 9;
+			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("allocated", object, 1)) {
+			field->type = PRINT_CLUSTER_ACPU;
+			field->name = xstrdup("Allocated");
+			if(time_format == SREPORT_TIME_SECS_PER)
+				field->len = 20;
+			else
+				field->len = 12;
+			field->print_routine = sreport_print_time;
+		} else if(!strncasecmp("down", object, 1)) {
+			field->type = PRINT_CLUSTER_DCPU;
+			field->name = xstrdup("Down");
+			if(time_format == SREPORT_TIME_SECS_PER)
+				field->len = 18;
+			else
+				field->len = 10;
+			field->print_routine = sreport_print_time;
+		} else if(!strncasecmp("idle", object, 1)) {
+			field->type = PRINT_CLUSTER_ICPU;
+			field->name = xstrdup("Idle");
+			if(time_format == SREPORT_TIME_SECS_PER)
+				field->len = 20;
+			else
+				field->len = 12;
+			field->print_routine = sreport_print_time;
+		} else if(!strncasecmp("overcommited", object, 1)) {
+			field->type = PRINT_CLUSTER_OCPU;
+			field->name = xstrdup("Over Comm");
+			if(time_format == SREPORT_TIME_SECS_PER)
+				field->len = 18;
+			else
+				field->len = 9;
+			field->print_routine = sreport_print_time;
+		} else if(!strncasecmp("reported", object, 3)) {
+			field->type = PRINT_CLUSTER_TOTAL;
+			field->name = xstrdup("Reported");
+			if(time_format == SREPORT_TIME_SECS_PER)
+				field->len = 20;
+			else
+				field->len = 12;
+			field->print_routine = sreport_print_time;
+		} else if(!strncasecmp("reserved", object, 3)) {
+			field->type = PRINT_CLUSTER_RCPU;
+			field->name = xstrdup("Reserved");
+			if(time_format == SREPORT_TIME_SECS_PER)
+				field->len = 18;
+			else
+				field->len = 9;
+			field->print_routine = sreport_print_time;
+		} else {
+			printf("Unknown field '%s'\n", object);
+			xfree(field);
+			continue;
+		}
+		list_append(print_fields_list, field);		
+	}
+	list_iterator_destroy(itr);
+
+	return SLURM_SUCCESS;
+}
+
+static List _get_cluster_list(int argc, char *argv[], uint32_t *total_time,
+			      char *report_name, List format_list)
+{
+	acct_cluster_cond_t *cluster_cond = 
+		xmalloc(sizeof(acct_cluster_cond_t));
+	int i=0;
+	List cluster_list = NULL;
+
+	cluster_cond->cluster_list = list_create(slurm_destroy_char);
+	cluster_cond->with_usage = 1;
+
+	_set_cond(&i, argc, argv, cluster_cond, format_list);
+	
+	cluster_list = acct_storage_g_get_clusters(db_conn, cluster_cond);
+	if(!cluster_list) {
+		printf(" Problem with cluster query.\n");
+		return NULL;
+	}
+
+	if(print_fields_have_header) {
+		char start_char[20];
+		char end_char[20];
+		time_t my_end = cluster_cond->usage_end-1;
+
+		slurm_make_time_str((time_t *)&cluster_cond->usage_start, 
+				    start_char, sizeof(start_char));
+		slurm_make_time_str(&my_end,
+				    end_char, sizeof(end_char));
+		printf("----------------------------------------"
+		       "----------------------------------------\n");
+		printf("%s %s - %s (%d*cpus secs)\n", 
+		       report_name, start_char, end_char, 
+		       (cluster_cond->usage_end - cluster_cond->usage_start));
+		printf("----------------------------------------"
+		       "----------------------------------------\n");
+	}
+	(*total_time) = cluster_cond->usage_end - cluster_cond->usage_start;
+
+	destroy_acct_cluster_cond(cluster_cond);
+	
+	return cluster_list;
+}
+
+extern int cluster_utilization(int argc, char *argv[])
+{
+	int rc = SLURM_SUCCESS;
+	ListIterator itr = NULL;
+	ListIterator itr2 = NULL;
+	ListIterator itr3 = NULL;
+	acct_cluster_rec_t *cluster = NULL;
+
+	print_field_t *field = NULL;
+	uint32_t total_time = 0;
+
+	List cluster_list = NULL; 
+
+	List format_list = list_create(slurm_destroy_char);
+
+	print_fields_list = list_create(destroy_print_field);
+
+
+	if(!(cluster_list = _get_cluster_list(argc, argv, &total_time,
+					      "Cluster Utilization",
+					      format_list))) 
+		goto end_it;
+
+	if(!list_count(format_list)) 
+		addto_char_list(format_list, "Cl,a,d,i,res,rep");
+
+	_setup_print_fields_list(format_list);
+	list_destroy(format_list);
+
+	itr = list_iterator_create(cluster_list);
+	itr2 = list_iterator_create(print_fields_list);
+
+	print_fields_header(print_fields_list);
+
+	while((cluster = list_next(itr))) {
+		cluster_accounting_rec_t *accting = NULL;
+		cluster_accounting_rec_t total_acct;
+		uint64_t total_reported = 0;
+		uint64_t local_total_time = 0;
+
+		if(!cluster->accounting_list
+		   || !list_count(cluster->accounting_list))
+			continue;
+
+		memset(&total_acct, 0, sizeof(cluster_accounting_rec_t));
+		
+		itr3 = list_iterator_create(cluster->accounting_list);
+		while((accting = list_next(itr3))) {
+			total_acct.alloc_secs += accting->alloc_secs;
+			total_acct.down_secs += accting->down_secs;
+			total_acct.idle_secs += accting->idle_secs;
+			total_acct.resv_secs += accting->resv_secs;
+			total_acct.over_secs += accting->over_secs;
+			total_acct.cpu_count += accting->cpu_count;
+		}
+		list_iterator_destroy(itr3);
+
+		total_acct.cpu_count /= list_count(cluster->accounting_list);
+		local_total_time = total_time * total_acct.cpu_count;
+		total_reported = total_acct.alloc_secs + total_acct.down_secs 
+			+ total_acct.idle_secs + total_acct.resv_secs;
+		
+		while((field = list_next(itr2))) {
+			switch(field->type) {
+			case PRINT_CLUSTER_NAME:
+				field->print_routine(SLURM_PRINT_VALUE,
+						     field,
+						     cluster->name);		
+				break;
+			case PRINT_CLUSTER_CPUS:
+				field->print_routine(SLURM_PRINT_VALUE,
+						     field,
+						     total_acct.cpu_count);
+				break;
+			case PRINT_CLUSTER_ACPU:
+				field->print_routine(SLURM_PRINT_VALUE,
+						     field,
+						     total_acct.alloc_secs,
+						     total_reported);
+				break;
+			case PRINT_CLUSTER_DCPU:
+				field->print_routine(SLURM_PRINT_VALUE,
+						     field,
+						     total_acct.down_secs,
+						     total_reported);
+				break;
+			case PRINT_CLUSTER_ICPU:
+				field->print_routine(SLURM_PRINT_VALUE,
+						     field,
+						     total_acct.idle_secs,
+						     total_reported);
+				break;
+			case PRINT_CLUSTER_RCPU:
+				field->print_routine(SLURM_PRINT_VALUE,
+						     field,
+						     total_acct.resv_secs,
+						     total_reported);
+				break;
+			case PRINT_CLUSTER_OCPU:
+					field->print_routine(SLURM_PRINT_VALUE,
+						     field,
+						     total_acct.over_secs,
+						     total_reported);
+				break;
+			case PRINT_CLUSTER_TOTAL:
+				field->print_routine(SLURM_PRINT_VALUE,
+						     field,
+						     total_reported,
+						     local_total_time);
+				break;
+			default:
+				break;
+			}
+		}
+		list_iterator_reset(itr2);
+		printf("\n");
+	}
+
+	list_iterator_destroy(itr2);
+	list_iterator_destroy(itr);
+
+end_it:
+	if(cluster_list) {
+		list_destroy(cluster_list);
+		cluster_list = NULL;
+	}
+	
+	if(print_fields_list) {
+		list_destroy(print_fields_list);
+		print_fields_list = NULL;
+	}
+
+	return rc;
+}
+
diff --git a/src/sreport/cluster_reports.h b/src/sreport/cluster_reports.h
new file mode 100644
index 0000000000000000000000000000000000000000..0901fb82bc274653af9c823193bbb77c7381a071
--- /dev/null
+++ b/src/sreport/cluster_reports.h
@@ -0,0 +1,47 @@
+/*****************************************************************************\
+ *  cluster_reports.h - functions for generating cluster reports
+ *                       from accounting infrastructure.
+ *****************************************************************************
+ *
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble <da@llnl.gov>
+ *  LLNL-CODE-402394.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef __SREPORT_CLUSTER_REPORTS_H
+#define __SREPORT_CLUSTER_REPORTS_H
+
+#include "sreport.h"
+
+extern int cluster_utilization(int argc, char *argv[]);
+
+#endif
diff --git a/src/sreport/common.c b/src/sreport/common.c
new file mode 100644
index 0000000000000000000000000000000000000000..38fcfac371cdc3743fc80131b6ab88b155a7178c
--- /dev/null
+++ b/src/sreport/common.c
@@ -0,0 +1,258 @@
+/*****************************************************************************\
+ *  common.c - common functions for generating reports
+ *             from accounting infrastructure.
+ *****************************************************************************
+ *
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble <da@llnl.gov>
+ *  LLNL-CODE-402394.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#include "sreport.h"
+
+extern void sreport_print_time(type_t type, print_field_t *field,
+			       uint64_t value, uint64_t total_time)
+{
+	if(!total_time)
+		total_time = 1;
+
+	switch(type) {
+	case SLURM_PRINT_HEADLINE:
+		if(print_fields_parsable_print)
+			printf("%s|", field->name);
+		else
+			printf("%-*.*s ", field->len, field->len, field->name);
+		break;
+	case SLURM_PRINT_UNDERSCORE:
+		if(!print_fields_parsable_print)
+			printf("%-*.*s ", field->len, field->len, 
+			       "---------------------------------------");
+		break;
+	case SLURM_PRINT_VALUE:
+		/* (value == unset)  || (value == cleared) */
+		if((value == NO_VAL) || (value == INFINITE)) {
+			if(print_fields_parsable_print)
+				printf("|");	
+			else				
+				printf("%-*s ", field->len, " ");
+		} else {
+			char *output = NULL;
+			double percent = (double)value;
+
+			switch(time_format) {
+			case SREPORT_TIME_SECS:
+				output = xstrdup_printf("%llu", value);
+				break;
+			case SREPORT_TIME_PERCENT:
+				percent /= total_time;
+				percent *= 100;
+				output = xstrdup_printf("%.2lf%%", percent);
+				break; 
+			case SREPORT_TIME_SECS_PER:
+				percent /= total_time;
+				percent *= 100;
+				output = xstrdup_printf("%llu(%.2lf%%)",
+							value, percent);
+				break;
+			default:
+				output = xstrdup_printf("%llu", value);
+				break;
+			}
+
+			if(print_fields_parsable_print)
+				printf("%s|", output);	
+			else
+				printf("%*s ", field->len, output);
+			xfree(output);
+		}
+		break;
+	default:
+		if(print_fields_parsable_print)
+			printf("%s|", "n/a");
+		else
+			printf("%-*.*s ", field->len, field->len, "n/a");
+		break;
+	}
+}
+
+extern int parse_option_end(char *option)
+{
+	int end = 0;
+	
+	if(!option)
+		return 0;
+
+	while(option[end] && option[end] != '=')
+		end++;
+	if(!option[end])
+		return 0;
+	end++;
+	return end;
+}
+
+/* you need to xfree whatever is sent from here */
+extern char *strip_quotes(char *option, int *increased)
+{
+	int end = 0;
+	int i=0, start=0;
+	char *meat = NULL;
+
+	if(!option)
+		return NULL;
+
+	/* first strip off the ("|')'s */
+	if (option[i] == '\"' || option[i] == '\'')
+		i++;
+	start = i;
+
+	while(option[i]) {
+		if(option[i] == '\"' || option[i] == '\'') {
+			end++;
+			break;
+		}
+		i++;
+	}
+	end += i;
+
+	meat = xmalloc((i-start)+1);
+	memcpy(meat, option+start, (i-start));
+
+	if(increased)
+		(*increased) += end;
+
+	return meat;
+}
+
+extern void addto_char_list(List char_list, char *names)
+{
+	int i=0, start=0;
+	char *name = NULL, *tmp_char = NULL;
+	ListIterator itr = list_iterator_create(char_list);
+
+	if(names && char_list) {
+		if (names[i] == '\"' || names[i] == '\'')
+			i++;
+		start = i;
+		while(names[i]) {
+			if(names[i] == '\"' || names[i] == '\'')
+				break;
+			else if(names[i] == ',') {
+				if((i-start) > 0) {
+					name = xmalloc((i-start+1));
+					memcpy(name, names+start, (i-start));
+
+					while((tmp_char = list_next(itr))) {
+						if(!strcasecmp(tmp_char, name))
+							break;
+					}
+
+					if(!tmp_char)
+						list_append(char_list, name);
+					else 
+						xfree(name);
+					list_iterator_reset(itr);
+				}
+				i++;
+				start = i;
+			}
+			i++;
+		}
+		if((i-start) > 0) {
+			name = xmalloc((i-start)+1);
+			memcpy(name, names+start, (i-start));
+			while((tmp_char = list_next(itr))) {
+				if(!strcasecmp(tmp_char, name))
+					break;
+			}
+			
+			if(!tmp_char)
+				list_append(char_list, name);
+			else 
+				xfree(name);
+		}
+	}	
+	list_iterator_destroy(itr);
+} 
+
+extern int set_start_end_time(time_t *start, time_t *end)
+{
+	time_t my_time = time(NULL);
+	struct tm start_tm;
+	struct tm end_tm;
+
+	/* Default is going to be the last day */
+	if(!(*end)) {
+		if(!localtime_r(&my_time, &end_tm)) {
+			error("Couldn't get localtime from end %d",
+			      my_time);
+			return SLURM_ERROR;
+		}
+		end_tm.tm_hour = 0;
+		(*end) = mktime(&end_tm);		
+	} else {
+		if(!localtime_r(end, &end_tm)) {
+			error("Couldn't get localtime from user end %d",
+			      my_time);
+			return SLURM_ERROR;
+		}
+	}
+	end_tm.tm_sec = 0;
+	end_tm.tm_min = 0;
+	end_tm.tm_isdst = -1;
+	(*end) = mktime(&end_tm);		
+
+	if(!(*start)) {
+		if(!localtime_r(&my_time, &start_tm)) {
+			error("Couldn't get localtime from start %d",
+			      my_time);
+			return SLURM_ERROR;
+		}
+		start_tm.tm_hour = 0;
+		start_tm.tm_mday--;
+		(*start) = mktime(&start_tm);		
+	} else {
+		if(!localtime_r(start, &start_tm)) {
+			error("Couldn't get localtime from user start %d",
+			      my_time);
+			return SLURM_ERROR;
+		}
+	}
+	start_tm.tm_sec = 0;
+	start_tm.tm_min = 0;
+	start_tm.tm_isdst = -1;
+	(*start) = mktime(&start_tm);		
+
+	if((*end)-(*start) < 3600) 
+		(*end) = (*start) + 3600;
+
+	return SLURM_SUCCESS;
+}
diff --git a/src/sreport/job_reports.c b/src/sreport/job_reports.c
new file mode 100644
index 0000000000000000000000000000000000000000..ff249fac08652a8b8d9a4d96a8a10ad2740ef8e4
--- /dev/null
+++ b/src/sreport/job_reports.c
@@ -0,0 +1,677 @@
+/*****************************************************************************\
+ *  job_reports.c - functions for generating job reports
+ *                     from accounting infrastructure.
+ *****************************************************************************
+ *
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble <da@llnl.gov>
+ *  LLNL-CODE-402394.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#include "job_reports.h"
+
+typedef struct {
+	List jobs; /* This should be a NULL destroy since we are just
+		    * putting a pointer to a jobacct_job_rect_t here
+		    * not allocating any new memory */
+	uint32_t min_size; /* smallest size of job in cpus here 0 if first */
+	uint32_t max_size; /* largest size of job in cpus here INFINITE if
+			    * last */
+	uint32_t count; /* count of jobs */
+	uint64_t cpu_secs; /* how many cpus secs taken up by this
+			    * grouping */
+} local_grouping_t;
+
+typedef struct {
+	char *acct; /*account name */
+	uint64_t cpu_secs; /* how many cpus secs taken up by this
+			    * acct */
+	List groups; /* Names of what are being grouped char *'s*/
+	uint32_t lft;
+	uint32_t rgt;
+} acct_grouping_t;
+
+typedef struct {
+	char *cluster; /*cluster name */
+	uint64_t cpu_secs; /* how many cpus secs taken up by this
+			    * cluster */
+	List acct_list; /* containing acct_grouping_t's */
+} cluster_grouping_t;
+
+enum {
+	PRINT_JOB_ACCOUNT,
+	PRINT_JOB_CLUSTER,
+	PRINT_JOB_COUNT,
+	PRINT_JOB_CPUS,
+	PRINT_JOB_DUR,
+	PRINT_JOB_NODES,
+	PRINT_JOB_SIZE,
+	PRINT_JOB_USER
+};
+
+static List print_fields_list = NULL; /* types are of print_field_t */
+static List grouping_print_fields_list = NULL; /* types are of print_field_t */
+
+static void _destroy_local_grouping(void *object)
+{
+	local_grouping_t *local_grouping = (local_grouping_t *)object;
+	if(local_grouping) {
+		list_destroy(local_grouping->jobs);
+		xfree(local_grouping);
+	}
+}
+
+static void _destroy_acct_grouping(void *object)
+{
+	acct_grouping_t *acct_grouping = (acct_grouping_t *)object;
+	if(acct_grouping) {
+		xfree(acct_grouping->acct);
+		if(acct_grouping->groups)
+			list_destroy(acct_grouping->groups);
+		xfree(acct_grouping);
+	}
+}
+
+static void _destroy_cluster_grouping(void *object)
+{
+	cluster_grouping_t *cluster_grouping = (cluster_grouping_t *)object;
+	if(cluster_grouping) {
+		xfree(cluster_grouping->cluster);
+		if(cluster_grouping->acct_list)
+			list_destroy(cluster_grouping->acct_list);
+		xfree(cluster_grouping);
+	}
+}
+
+static int _set_cond(int *start, int argc, char *argv[],
+		     acct_job_cond_t *job_cond,
+		     List format_list, List grouping_list)
+{
+	int i;
+	int set = 0;
+	int end = 0;
+	int local_cluster_flag = all_clusters_flag;
+
+	for (i=(*start); i<argc; i++) {
+		end = parse_option_end(argv[i]);
+		if (strncasecmp (argv[i], "Set", 3) == 0) {
+			i--;
+			break;
+		} else if(!end && !strncasecmp(argv[i], "where", 5)) {
+			continue;
+		} else if(!end && !strncasecmp(argv[i], "all_clusters", 1)) {
+			local_cluster_flag = 1;
+			continue;
+		} else if(!end) {
+			addto_char_list(job_cond->cluster_list, argv[i]);
+			set = 1;
+		} else if (strncasecmp (argv[i], "Accounts", 2) == 0) {
+			addto_char_list(job_cond->acct_list,
+					argv[i]+end);
+			set = 1;
+		} else if (strncasecmp (argv[i], "Associations", 2) == 0) {
+			addto_char_list(job_cond->associd_list,
+					argv[i]+end);
+			set = 1;
+		} else if (strncasecmp (argv[i], "Clusters", 1) == 0) {
+			addto_char_list(job_cond->cluster_list,
+					argv[i]+end);
+			set = 1;
+		} else if (strncasecmp (argv[i], "End", 1) == 0) {
+			job_cond->usage_end = parse_time(argv[i]+end);
+			set = 1;
+		} else if (strncasecmp (argv[i], "Format", 1) == 0) {
+			if(format_list)
+				addto_char_list(format_list, argv[i]+end);
+		} else if (strncasecmp (argv[i], "Gid", 2) == 0) {
+			addto_char_list(job_cond->groupid_list,
+					argv[i]+end);
+			set = 1;
+		} else if (strncasecmp (argv[i], "grouping", 2) == 0) {
+			if(grouping_list)
+				addto_char_list(grouping_list, argv[i]+end);
+		} else if (strncasecmp (argv[i], "Jobs", 1) == 0) {
+			char *end_char = NULL, *start_char = argv[i]+end;
+			jobacct_selected_step_t *selected_step = NULL;
+			char *dot = NULL;
+
+			while ((end_char = strstr(start_char, ",")) 
+			       && start_char) {
+				*end_char = 0;
+				while (isspace(*start_char))
+					start_char++;	/* discard whitespace */
+				if(!(int)*start_char)
+					continue;
+				selected_step = xmalloc(
+					sizeof(jobacct_selected_step_t));
+				list_append(job_cond->step_list, selected_step);
+				
+				dot = strstr(start_char, ".");
+				if (dot == NULL) {
+					debug2("No jobstep requested");
+					selected_step->step = NULL;
+					selected_step->stepid = 
+						(uint32_t)NO_VAL;
+				} else {
+					*dot++ = 0;
+					selected_step->step = xstrdup(dot);
+					selected_step->stepid = atoi(dot);
+				}
+				selected_step->job = xstrdup(start_char);
+				selected_step->jobid = atoi(start_char);
+				start_char = end_char + 1;
+			}
+			
+			set = 1;
+		} else if (strncasecmp (argv[i], "Partitions", 1) == 0) {
+			addto_char_list(job_cond->partition_list,
+					argv[i]+end);
+			set = 1;
+		} else if (strncasecmp (argv[i], "Start", 1) == 0) {
+			job_cond->usage_start = parse_time(argv[i]+end);
+			set = 1;
+		} else if (strncasecmp (argv[i], "Users", 1) == 0) {
+			addto_char_list(job_cond->user_list,
+					argv[i]+end);
+			set = 1;
+		} else {
+			printf(" Unknown condition: %s\n"
+			       "Use keyword set to modify value\n", argv[i]);
+		}
+	}
+	(*start) = i;
+
+	if(!local_cluster_flag && !list_count(job_cond->cluster_list)) {
+		char *temp = slurm_get_cluster_name();
+		if(temp)
+			list_append(job_cond->cluster_list, temp);
+	}
+
+	set_start_end_time((time_t *)&job_cond->usage_start,
+			   (time_t *)&job_cond->usage_end);
+
+	return set;
+}
+
+
+static int _setup_print_fields_list(List format_list)
+{
+	ListIterator itr = NULL;
+	print_field_t *field = NULL;
+	char *object = NULL;
+
+	if(!format_list || !list_count(format_list)) {
+		printf(" error: we need a format list to set up the print.\n");
+		return SLURM_ERROR;
+	}
+
+	if(!print_fields_list)
+		print_fields_list = list_create(destroy_print_field);
+
+	itr = list_iterator_create(format_list);
+	while((object = list_next(itr))) {
+		field = xmalloc(sizeof(print_field_t));
+		if(!strncasecmp("Account", object, 1)) {
+			field->type = PRINT_JOB_ACCOUNT;
+			field->name = xstrdup("Account");
+			field->len = 9;
+			field->print_routine = print_fields_str;
+		} else if(!strncasecmp("Cluster", object, 2)) {
+			field->type = PRINT_JOB_CLUSTER;
+			field->name = xstrdup("Cluster");
+			field->len = 9;
+			field->print_routine = print_fields_str;
+		} else if(!strncasecmp("Count", object, 2)) {
+			field->type = PRINT_JOB_COUNT;
+			field->name = xstrdup("Job Count");
+			field->len = 9;
+			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("cpu_count", object, 2)) {
+			field->type = PRINT_JOB_CPUS;
+			field->name = xstrdup("CPU Count");
+			field->len = 9;
+			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("Duration", object, 1)) {
+			field->type = PRINT_JOB_DUR;
+			field->name = xstrdup("Duration");
+			field->len = 12;
+			field->print_routine = print_fields_time;
+		} else if(!strncasecmp("node_count", object, 2)) {
+			field->type = PRINT_JOB_NODES;
+			field->name = xstrdup("Node Count");
+			field->len = 9;
+			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("User", object, 1)) {
+			field->type = PRINT_JOB_USER;
+			field->name = xstrdup("User");
+			field->len = 9;
+			field->print_routine = print_fields_str;
+		} else {
+			printf("Unknown field '%s'\n", object);
+			xfree(field);
+			continue;
+		}
+		list_append(print_fields_list, field);		
+	}
+	list_iterator_destroy(itr);
+
+	return SLURM_SUCCESS;
+}
+
+static int _setup_grouping_print_fields_list(List grouping_list)
+{
+	ListIterator itr = NULL;
+	print_field_t *field = NULL;
+	char *object = NULL;
+	uint32_t last_size = 0;
+	uint32_t size = 0;
+
+	if(!grouping_list || !list_count(grouping_list)) {
+		printf(" error: we need a grouping list to "
+		       "set up the print.\n");
+		return SLURM_ERROR;
+	}
+
+	if(!grouping_print_fields_list)
+		grouping_print_fields_list = list_create(destroy_print_field);
+
+	itr = list_iterator_create(grouping_list);
+	while((object = list_next(itr))) {
+		field = xmalloc(sizeof(print_field_t));
+		size = atoi(object);
+
+		field->type = PRINT_JOB_SIZE;
+		field->name = xstrdup_printf("%u-%u cpus", last_size, size-1);
+		field->len = 13;
+		field->print_routine = sreport_print_time;
+		last_size = size;
+		list_append(grouping_print_fields_list, field);		
+	}
+	list_iterator_destroy(itr);
+
+	if(last_size) {
+		field = xmalloc(sizeof(print_field_t));
+		field->type = PRINT_JOB_SIZE;
+		field->name = xstrdup_printf("> %u cpus", last_size);
+		field->len = 13;
+		field->print_routine = sreport_print_time;
+		list_append(grouping_print_fields_list, field);		
+	}
+
+	return SLURM_SUCCESS;
+}
+
+extern int job_sizes_grouped_by_top_acct(int argc, char *argv[])
+{
+	int rc = SLURM_SUCCESS;
+	acct_job_cond_t *job_cond = xmalloc(sizeof(acct_job_cond_t));
+	acct_association_cond_t assoc_cond;
+	acct_association_rec_t *assoc = NULL;
+	int i=0;
+
+	ListIterator itr = NULL;
+	ListIterator itr2 = NULL;
+	ListIterator cluster_itr = NULL;
+	ListIterator local_itr = NULL;
+	ListIterator acct_itr = NULL;
+	ListIterator group_itr = NULL;
+	
+
+	jobacct_job_rec_t *job = NULL;
+	cluster_grouping_t *cluster_group = NULL;
+	acct_grouping_t *acct_group = NULL;
+	local_grouping_t *local_group = NULL;
+
+	print_field_t *field = NULL;
+	print_field_t total_field;
+	uint32_t total_time = 0;
+	List job_list = NULL;
+	List cluster_list = NULL;
+	List assoc_list = NULL;
+
+	List format_list = list_create(slurm_destroy_char);
+	List grouping_list = list_create(slurm_destroy_char);
+
+	List header_list = list_create(NULL);
+
+//	sreport_time_format_t temp_time_format = time_format;
+
+	print_fields_list = list_create(destroy_print_field);
+
+	job_cond->acct_list = list_create(slurm_destroy_char);
+	job_cond->associd_list = list_create(slurm_destroy_char);
+	job_cond->cluster_list = list_create(slurm_destroy_char);
+	job_cond->groupid_list = list_create(slurm_destroy_char);
+	job_cond->partition_list = list_create(slurm_destroy_char);
+	job_cond->step_list = list_create(slurm_destroy_char);
+
+	_set_cond(&i, argc, argv, job_cond, NULL, grouping_list);
+
+	addto_char_list(format_list, "Cl,a");
+
+	if(!list_count(grouping_list)) 
+		addto_char_list(grouping_list, "50,250,500,1000");
+	
+	_setup_print_fields_list(format_list);
+	list_destroy(format_list);
+
+	_setup_grouping_print_fields_list(grouping_list);
+
+	job_list = jobacct_storage_g_get_jobs_cond(db_conn, job_cond);
+	if(!job_list) {
+		printf(" Problem with job query.\n");
+		goto end_it;
+	}
+
+	memset(&assoc_cond, 0, sizeof(acct_association_cond_t));
+	assoc_cond.acct_list = job_cond->acct_list;
+	assoc_cond.id_list = job_cond->associd_list;
+	assoc_cond.cluster_list = job_cond->cluster_list;
+	assoc_cond.partition_list = job_cond->partition_list;
+	assoc_cond.parent_acct = "root";
+
+	assoc_list = acct_storage_g_get_associations(db_conn, &assoc_cond);
+	
+	if(print_fields_have_header) {
+		char start_char[20];
+		char end_char[20];
+		time_t my_end = job_cond->usage_end-1;
+
+		slurm_make_time_str((time_t *)&job_cond->usage_start, 
+				    start_char, sizeof(start_char));
+		slurm_make_time_str(&my_end,
+				    end_char, sizeof(end_char));
+		printf("----------------------------------------"
+		       "----------------------------------------\n");
+		printf("Job Sizes %s - %s (%d secs)\n", 
+		       start_char, end_char, 
+		       (job_cond->usage_end - job_cond->usage_start));
+		printf("----------------------------------------"
+		       "----------------------------------------\n");
+	}
+	total_time = job_cond->usage_end - job_cond->usage_start;
+
+	cluster_list = list_create(_destroy_cluster_grouping);
+
+	cluster_itr = list_iterator_create(cluster_list);
+	group_itr = list_iterator_create(grouping_list);
+
+	if(!assoc_list) {
+		debug2(" No assoc list given.\n");
+		goto no_assocs;
+	}
+
+	itr = list_iterator_create(assoc_list);
+	while((assoc = list_next(itr))) {
+		while((cluster_group = list_next(cluster_itr))) {
+			if(!strcmp(assoc->cluster, cluster_group->cluster)) 
+				break;
+		}
+		if(!cluster_group) {
+			cluster_group = 
+				xmalloc(sizeof(cluster_grouping_t));
+			cluster_group->cluster = xstrdup(assoc->cluster);
+			cluster_group->acct_list =
+				list_create(_destroy_acct_grouping);
+			list_append(cluster_list, cluster_group);
+		}
+
+		acct_itr = list_iterator_create(cluster_group->acct_list);
+		while((acct_group = list_next(acct_itr))) {
+			if(!strcmp(assoc->acct, acct_group->acct))
+				break;
+		}
+		list_iterator_destroy(acct_itr);		
+			
+		if(!acct_group) {
+			uint32_t last_size = 0;
+			char *group = NULL;
+			acct_group = xmalloc(sizeof(acct_grouping_t));
+			acct_group->acct = xstrdup(assoc->acct);
+			acct_group->lft = assoc->lft;
+			acct_group->rgt = assoc->rgt;
+			acct_group->groups =
+				list_create(_destroy_local_grouping);
+			list_append(cluster_group->acct_list, acct_group);
+			while((group = list_next(group_itr))) {
+				local_group = xmalloc(sizeof(local_grouping_t));
+				local_group->jobs = list_create(NULL);
+				local_group->min_size = last_size;
+				last_size = atoi(group);
+				local_group->max_size = last_size-1;
+				list_append(acct_group->groups, local_group);
+			}
+			if(last_size) {
+				local_group = xmalloc(sizeof(local_grouping_t));
+				local_group->jobs = list_create(NULL);
+				local_group->min_size = last_size;
+				local_group->max_size = INFINITE;
+				list_append(acct_group->groups, local_group);
+			}
+			list_iterator_reset(group_itr);
+		}
+		list_iterator_reset(cluster_itr);
+	}
+	list_iterator_destroy(itr);
+no_assocs:
+	itr = list_iterator_create(job_list);
+
+	list_append_list(header_list, print_fields_list);
+	list_append_list(header_list, grouping_print_fields_list);
+
+	memset(&total_field, 0, sizeof(print_field_t));
+	total_field.type = PRINT_JOB_SIZE;
+	total_field.name = xstrdup("% of Cluster");
+	total_field.len = 12;
+	total_field.print_routine = sreport_print_time;
+	list_append(header_list, &total_field);
+
+	print_fields_header(header_list);
+	list_destroy(header_list);
+
+	while((job = list_next(itr))) {
+		char *local_cluster = "UNKNOWN";
+		char *local_account = "UNKNOWN";
+		char *group;
+
+		if(!job->elapsed) {
+			/* here we don't care about jobs that didn't
+			 * really run here */
+			continue;
+		}
+		if(job->cluster) 
+			local_cluster = job->cluster;
+		if(job->account) 
+			local_account = job->account;
+
+		while((cluster_group = list_next(cluster_itr))) {
+			if(!strcmp(local_cluster, cluster_group->cluster)) 
+				break;
+		}
+		if(!cluster_group) {
+			cluster_group = 
+				xmalloc(sizeof(cluster_grouping_t));
+			cluster_group->cluster = xstrdup(local_cluster);
+			cluster_group->acct_list =
+				list_create(_destroy_acct_grouping);
+			list_append(cluster_list, cluster_group);
+		}
+
+		acct_itr = list_iterator_create(cluster_group->acct_list);
+		while((acct_group = list_next(acct_itr))) {
+			if(acct_group->lft != (uint32_t)NO_VAL
+			   && job->lft != (uint32_t)NO_VAL) {
+				/* keep separate since we don't want
+				 * to so a strcmp if we don't have to 
+				 */
+				if(job->lft > acct_group->lft 
+				   && job->lft < acct_group->rgt)
+					break;
+			} else if(!strcmp(acct_group->acct, local_account))
+				break;
+		}
+		list_iterator_destroy(acct_itr);		
+			
+		if(!acct_group) {
+			uint32_t last_size = 0;
+			acct_group = xmalloc(sizeof(acct_grouping_t));
+			acct_group->acct = xstrdup(local_account);
+			acct_group->groups =
+				list_create(_destroy_local_grouping);
+			list_append(cluster_group->acct_list, acct_group);
+
+			while((group = list_next(group_itr))) {
+				local_group = xmalloc(sizeof(local_grouping_t));
+				local_group->jobs = list_create(NULL);
+				local_group->min_size = last_size;
+				last_size = atoi(group);
+				local_group->max_size = last_size-1;
+				list_append(acct_group->groups, local_group);
+			}
+			if(last_size) {
+				local_group = xmalloc(sizeof(local_grouping_t));
+				local_group->jobs = list_create(NULL);
+				local_group->min_size = last_size;
+				local_group->max_size = INFINITE;
+				list_append(acct_group->groups, local_group);
+			}
+			list_iterator_reset(group_itr);
+		}
+
+		local_itr = list_iterator_create(acct_group->groups);
+		while((local_group = list_next(local_itr))) {
+			uint64_t total_secs = 0;
+			if((job->alloc_cpus < local_group->min_size)
+			   || (job->alloc_cpus > local_group->max_size))
+				continue;
+			list_append(local_group->jobs, job);
+			local_group->count++;
+			total_secs = job->elapsed*job->alloc_cpus;
+			local_group->cpu_secs += total_secs;
+			acct_group->cpu_secs += total_secs;
+			cluster_group->cpu_secs += total_secs;
+		}
+		list_iterator_destroy(local_itr);		
+
+		list_iterator_reset(cluster_itr);
+	}
+	list_iterator_destroy(group_itr);
+	list_destroy(grouping_list);
+	list_iterator_destroy(itr);
+	
+//	time_format = SREPORT_TIME_PERCENT;
+	
+	itr = list_iterator_create(print_fields_list);
+	itr2 = list_iterator_create(grouping_print_fields_list);
+	while((cluster_group = list_next(cluster_itr))) {
+		acct_itr = list_iterator_create(cluster_group->acct_list);
+		while((acct_group = list_next(acct_itr))) {
+			while((field = list_next(itr))) {
+				switch(field->type) {
+				case PRINT_JOB_CLUSTER:
+					field->print_routine(
+						SLURM_PRINT_VALUE,
+						field,
+						cluster_group->cluster);
+					break;
+				case PRINT_JOB_ACCOUNT:
+					field->print_routine(SLURM_PRINT_VALUE,
+							     field,
+							     acct_group->acct);
+					break;
+				default:
+					break;
+				}
+			}
+			list_iterator_reset(itr);
+			local_itr = list_iterator_create(acct_group->groups);
+			while((local_group = list_next(local_itr))) {
+				field = list_next(itr2);
+				switch(field->type) {
+				case PRINT_JOB_SIZE:
+					field->print_routine(
+						SLURM_PRINT_VALUE,
+						field,
+						local_group->cpu_secs,
+						acct_group->cpu_secs);
+					break;
+				default:
+					break;
+				}
+			}
+			list_iterator_reset(itr2);
+			list_iterator_destroy(local_itr);
+			total_field.print_routine(SLURM_PRINT_VALUE,
+						  &total_field,
+						  acct_group->cpu_secs,
+						  cluster_group->cpu_secs);
+			
+			printf("\n");
+		}
+		list_iterator_destroy(acct_itr);
+	}
+	list_iterator_destroy(itr);
+
+//	time_format = temp_time_format;
+
+end_it:
+
+	destroy_acct_job_cond(job_cond);
+	
+	if(job_list) {
+		list_destroy(job_list);
+		job_list = NULL;
+	}
+	
+	if(assoc_list) {
+		list_destroy(assoc_list);
+		job_list = NULL;
+	}
+	
+	if(cluster_list) {
+		list_destroy(cluster_list);
+		cluster_list = NULL;
+	}
+	
+	if(print_fields_list) {
+		list_destroy(print_fields_list);
+		print_fields_list = NULL;
+	}
+
+	if(grouping_print_fields_list) {
+		list_destroy(grouping_print_fields_list);
+		grouping_print_fields_list = NULL;
+	}
+
+	return rc;
+}
+
diff --git a/src/sreport/job_reports.h b/src/sreport/job_reports.h
new file mode 100644
index 0000000000000000000000000000000000000000..f4641967c9d4a3271b4436f9572b99afd30e18c9
--- /dev/null
+++ b/src/sreport/job_reports.h
@@ -0,0 +1,47 @@
+/*****************************************************************************\
+ *  job_reports.h - functions for generating job reports
+ *                     from accounting infrastructure.
+ *****************************************************************************
+ *
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble <da@llnl.gov>
+ *  LLNL-CODE-402394.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef __SREPORT_JOB_REPORTS_H
+#define __SREPORT_JOB_REPORTS_H
+
+#include "sreport.h"
+
+extern int job_sizes_grouped_by_top_acct(int argc, char *argv[]);
+
+#endif
diff --git a/src/sreport/sreport.c b/src/sreport/sreport.c
index c3dfb8223eaf579769e4c88352147e9bd382ddf9..a348dbb59fa4a01034ed461e69b4a11d8a6aef4f 100644
--- a/src/sreport/sreport.c
+++ b/src/sreport/sreport.c
@@ -37,6 +37,10 @@
 \*****************************************************************************/
 
 #include "src/sreport/sreport.h"
+#include "src/sreport/assoc_reports.h"
+#include "src/sreport/cluster_reports.h"
+#include "src/sreport/job_reports.h"
+#include "src/sreport/user_reports.h"
 #include "src/common/xsignal.h"
 
 #define OPT_LONG_HIDE   0x102
@@ -46,16 +50,20 @@ char *command_name;
 int exit_code;		/* sreport's exit code, =1 on any error at any time */
 int exit_flag;		/* program to terminate if =1 */
 int input_words;	/* number of words of input permitted */
-int one_liner;		/* one record per line if =1 */
 int quiet_flag;		/* quiet=1, verbose=-1, normal=0 */
-int rollback_flag;       /* immediate execute=1, else = 0 */
-int with_assoc_flag = 0;
+int all_clusters_flag = 0;
+sreport_time_format_t time_format = SREPORT_TIME_SECS;
 void *db_conn = NULL;
 uint32_t my_uid = 0;
 
+static void	_job_rep (int argc, char *argv[]);
+static void	_user_rep (int argc, char *argv[]);
+static void	_cluster_rep (int argc, char *argv[]);
+static void	_assoc_rep (int argc, char *argv[]);
 static int	_get_command (int *argc, char *argv[]);
 static void     _print_version( void );
 static int	_process_command (int argc, char *argv[]);
+static int      _set_time_format(char *format);
 static void	_usage ();
 
 int 
@@ -67,9 +75,9 @@ main (int argc, char *argv[])
 
 	int option_index;
 	static struct option long_options[] = {
+		{"all_clusters", 0, 0, 'a'},
 		{"help",     0, 0, 'h'},
 		{"immediate",0, 0, 'i'},
-		{"oneliner", 0, 0, 'o'},
 		{"no_header", 0, 0, 'n'},
 		{"parsable", 0, 0, 'p'},
 		{"quiet",    0, 0, 'q'},
@@ -80,18 +88,17 @@ main (int argc, char *argv[])
 	};
 
 	command_name      = argv[0];
-	rollback_flag     = 1;
 	exit_code         = 0;
 	exit_flag         = 0;
 	input_field_count = 0;
 	quiet_flag        = 0;
-	log_init("sacctmgr", opts, SYSLOG_FACILITY_DAEMON, NULL);
+	log_init("sreport", opts, SYSLOG_FACILITY_DAEMON, NULL);
 
-	while((opt_char = getopt_long(argc, argv, "hionpqsvV",
+	while((opt_char = getopt_long(argc, argv, "ahnpqt:vV",
 			long_options, &option_index)) != -1) {
 		switch (opt_char) {
 		case (int)'?':
-			fprintf(stderr, "Try \"sacctmgr --help\" "
+			fprintf(stderr, "Try \"sreport --help\" "
 				"for more information\n");
 			exit(1);
 			break;
@@ -99,23 +106,20 @@ main (int argc, char *argv[])
 			_usage ();
 			exit(exit_code);
 			break;
-		case (int)'i':
-			rollback_flag = 0;
+		case (int)'a':
+			all_clusters_flag = 1;
 			break;
-		case (int)'o':
-			one_liner = 1;
+		case (int)'n':
+			print_fields_have_header = 0;
+			break;
+		case (int)'p':
+			print_fields_parsable_print = 1;
 			break;
-/* 		case (int)'n': */
-/* 			have_header = 0; */
-/* 			break; */
-/* 		case (int)'p': */
-/* 			parsable_print = 1; */
-/* 			break; */
 		case (int)'q':
 			quiet_flag = 1;
 			break;
-		case (int)'s':
-			with_assoc_flag = 1;
+		case (int)'t':
+			_set_time_format(optarg);
 			break;
 		case (int)'v':
 			quiet_flag = -1;
@@ -143,7 +147,7 @@ main (int argc, char *argv[])
 		}	
 	}
 
-	db_conn = acct_storage_g_get_connection(false, rollback_flag);
+	db_conn = acct_storage_g_get_connection(false, false);
 	my_uid = getuid();
 
 	if (input_field_count)
@@ -160,7 +164,6 @@ main (int argc, char *argv[])
 
 	acct_storage_g_close_connection(&db_conn);
 	slurm_acct_storage_fini();
-	printf("\n");
 	exit(exit_code);
 }
 
@@ -187,6 +190,91 @@ getline(const char *prompt)
 }
 #endif
 
+/* 
+ * _job_rep - Reports having to do with jobs 
+ * IN argc - count of arguments
+ * IN argv - list of arguments
+ */
+static void _job_rep (int argc, char *argv[]) 
+{
+	int error_code = SLURM_SUCCESS;
+
+	/* First identify the entity to add */
+	if (strncasecmp (argv[0], "Sizes", 1) == 0) {
+		error_code = job_sizes_grouped_by_top_acct(
+			(argc - 1), &argv[1]);
+	} else {
+		exit_code = 1;
+		fprintf(stderr, "Not valid report %s\n", argv[0]);
+		fprintf(stderr, "Valid job reports are, ");
+		fprintf(stderr, "\"Sizes\"\n");
+	}
+	
+	if (error_code) {
+		exit_code = 1;
+	}
+}
+
+/* 
+ * _user_rep - Reports having to do with jobs 
+ * IN argc - count of arguments
+ * IN argv - list of arguments
+ */
+static void _user_rep (int argc, char *argv[]) 
+{
+	int error_code = SLURM_SUCCESS;
+
+	if (strncasecmp (argv[0], "Top", 1) == 0) {
+		error_code = user_top((argc - 1), &argv[1]);
+	} else {
+		exit_code = 1;
+		fprintf(stderr, "Not valid report %s\n", argv[0]);
+		fprintf(stderr, "Valid user reports are, ");
+		fprintf(stderr, "\"Top\"\n");
+	}	
+	
+	if (error_code) {
+		exit_code = 1;
+	}
+}
+
+/* 
+ * _cluster_rep - Reports having to do with jobs 
+ * IN argc - count of arguments
+ * IN argv - list of arguments
+ */
+static void _cluster_rep (int argc, char *argv[]) 
+{
+	int error_code = SLURM_SUCCESS;
+
+	if (strncasecmp (argv[0], "Utilization", 1) == 0) {
+		error_code = cluster_utilization((argc - 1), &argv[1]);
+	} else {
+		exit_code = 1;
+		fprintf(stderr, "Not valid report %s\n", argv[0]);
+		fprintf(stderr, "Valid cluster reports are, ");
+		fprintf(stderr, "\"Utilization\"\n");
+	}
+	
+	if (error_code) {
+		exit_code = 1;
+	}
+}
+
+/* 
+ * _assoc_rep - Reports having to do with jobs 
+ * IN argc - count of arguments
+ * IN argv - list of arguments
+ */
+static void _assoc_rep (int argc, char *argv[]) 
+{
+	int error_code = SLURM_SUCCESS;
+
+	if (error_code) {
+		exit_code = 1;
+	}
+}
+
 /*
  * _get_command - get a command from the user
  * OUT argc - location to store count of arguments
@@ -287,6 +375,24 @@ _process_command (int argc, char *argv[])
 		exit_code = 1;
 		if (quiet_flag == -1)
 			fprintf(stderr, "no input");
+	} else if ((strncasecmp (argv[0], "association", 1) == 0)) {
+		if (argc < 2) {
+			exit_code = 1;
+			if (quiet_flag != 1)
+				fprintf(stderr, 
+				        "too few arguments for keyword:%s\n", 
+				        argv[0]);
+		} else 
+			_assoc_rep((argc - 1), &argv[1]);
+	} else if ((strncasecmp (argv[0], "cluster", 2) == 0)) {
+		if (argc < 2) {
+			exit_code = 1;
+			if (quiet_flag != 1)
+				fprintf(stderr, 
+				        "too few arguments for keyword:%s\n", 
+				        argv[0]);
+		} else 
+			_cluster_rep((argc - 1), &argv[1]);
 	} else if (strncasecmp (argv[0], "help", 2) == 0) {
 		if (argc > 1) {
 			exit_code = 1;
@@ -295,14 +401,15 @@ _process_command (int argc, char *argv[])
 				 argv[0]);
 		}
 		_usage ();
-	} else if (strncasecmp (argv[0], "oneliner", 1) == 0) {
-		if (argc > 1) {
+	} else if ((strncasecmp (argv[0], "job", 1) == 0)) {
+		if (argc < 2) {
 			exit_code = 1;
-			fprintf (stderr, 
-				 "too many arguments for keyword:%s\n",
-				 argv[0]);
-		}
-		one_liner = 1;
+			if (quiet_flag != 1)
+				fprintf(stderr, 
+				        "too few arguments for keyword:%s\n", 
+				        argv[0]);
+		} else 
+			_job_rep((argc - 1), &argv[1]);
 	} else if (strncasecmp (argv[0], "quiet", 4) == 0) {
 		if (argc > 1) {
 			exit_code = 1;
@@ -310,7 +417,8 @@ _process_command (int argc, char *argv[])
 				 argv[0]);
 		}
 		quiet_flag = 1;
-	} else if ((strncasecmp (argv[0], "exit", 4) == 0) ||
+	} else if ((strncasecmp (argv[0], "exit", 1) == 0) ||
+		   (strncasecmp (argv[0], "\\q", 2) == 0) ||
 		   (strncasecmp (argv[0], "quit", 4) == 0)) {
 		if (argc > 1) {
 			exit_code = 1;
@@ -319,6 +427,14 @@ _process_command (int argc, char *argv[])
 				 argv[0]);
 		}
 		exit_flag = 1;
+	} else if (strncasecmp (argv[0], "time", 1) == 0) {
+		if (argc < 2) {
+			exit_code = 1;
+			fprintf (stderr,
+				 "too few arguments for keyword:%s\n",
+				 argv[0]);
+		} else		
+			_set_time_format(argv[1]);
 	} else if (strncasecmp (argv[0], "verbose", 4) == 0) {
 		if (argc > 1) {
 			exit_code = 1;
@@ -335,6 +451,15 @@ _process_command (int argc, char *argv[])
 				 argv[0]);
 		}		
 		_print_version();
+	} else if ((strncasecmp (argv[0], "user", 1) == 0)) {
+		if (argc < 2) {
+			exit_code = 1;
+			if (quiet_flag != 1)
+				fprintf(stderr, 
+				        "too few arguments for keyword:%s\n", 
+				        argv[0]);
+		} else 
+			_user_rep((argc - 1), &argv[1]);
 	} else {
 		exit_code = 1;
 		fprintf (stderr, "invalid keyword: %s\n", argv[0]);
@@ -343,6 +468,23 @@ _process_command (int argc, char *argv[])
 	return 0;
 }
 
+static int _set_time_format(char *format)
+{
+	if (strncasecmp (format, "SecPer", 6) == 0) {
+		time_format = SREPORT_TIME_SECS_PER;
+	} else if (strncasecmp (format, "Sec", 1) == 0) {
+		time_format = SREPORT_TIME_SECS;
+	} else if (strncasecmp (format, "Percent", 1) == 0) {
+		time_format = SREPORT_TIME_PERCENT;
+	} else {
+		fprintf (stderr, "unknown time format %s", format);	
+		return SLURM_ERROR;
+	}
+
+	return SLURM_SUCCESS;
+}
+
+
 /* _usage - show the valid sreport commands */
 void _usage () {
 	printf ("\
@@ -373,7 +515,7 @@ sreport [<OPTION>] [<COMMAND>]                                             \n\
      !!                       Repeat the last command entered.             \n\
                                                                            \n\
                                                                            \n\
-  All commands entitys, and options are case-insensitive.               \n\n");
+  All commands, entities, and options are case-insensitive.              \n\n");
 	
 }
 
diff --git a/src/sreport/sreport.h b/src/sreport/sreport.h
index 034f03e5cd37c11cf96bf3ee69cecb355b862824..2633a6cf378dbeb978ca51f3ea89d8c1665413c0 100644
--- a/src/sreport/sreport.h
+++ b/src/sreport/sreport.h
@@ -81,8 +81,32 @@
 #include "src/common/parse_time.h"
 #include "src/common/slurm_accounting_storage.h"
 #include "src/common/xstring.h"
+#include "src/common/print_fields.h"
 
 #define CKPT_WAIT	10
 #define	MAX_INPUT_FIELDS 128
 
+typedef enum {
+	SREPORT_TIME_SECS,
+	SREPORT_TIME_PERCENT,
+	SREPORT_TIME_SECS_PER,
+} sreport_time_format_t;
+
+extern sreport_time_format_t time_format;
+extern char *command_name;
+extern int exit_code;	/* sacctmgr's exit code, =1 on any error at any time */
+extern int exit_flag;	/* program to terminate if =1 */
+extern int input_words;	/* number of words of input permitted */
+extern int quiet_flag;	/* quiet=1, verbose=-1, normal=0 */
+extern void *db_conn;
+extern uint32_t my_uid;
+extern int all_clusters_flag;
+
+extern void sreport_print_time(type_t type, print_field_t *field,
+			       uint64_t value, uint64_t total_time);
+extern int parse_option_end(char *option);
+extern char *strip_quotes(char *option, int *increased);
+extern void addto_char_list(List char_list, char *names);
+extern int set_start_end_time(time_t *start, time_t *end);
+
 #endif /* HAVE_SREPORT_H */
diff --git a/src/sreport/user_reports.c b/src/sreport/user_reports.c
new file mode 100644
index 0000000000000000000000000000000000000000..d83b9d886500cfa76923b256f0396e1fc8a42fac
--- /dev/null
+++ b/src/sreport/user_reports.c
@@ -0,0 +1,507 @@
+/*****************************************************************************\
+ *  user_reports.c - functions for generating user reports
+ *                     from accounting infrastructure.
+ *****************************************************************************
+ *
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble <da@llnl.gov>
+ *  LLNL-CODE-402394.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#include "user_reports.h"
+
+enum {
+	PRINT_USER_ACCT,
+	PRINT_USER_CLUSTER,
+	PRINT_USER_LOGIN,
+	PRINT_USER_PROPER,
+	PRINT_USER_USED
+};
+
+
+typedef struct {
+	List acct_list; /* list of char *'s */
+	uint64_t cpu_secs;
+	char *name;
+} local_user_rec_t;
+
+typedef struct {
+	uint64_t cpu_secs;
+	char *name;
+	List user_list; /* list of local_user_rec_t *'s */
+} local_cluster_rec_t;
+
+static List print_fields_list = NULL; /* types are of print_field_t */
+static bool group_accts = false;
+static int top_limit = 10;
+
+static void _destroy_local_user_rec(void *object)
+{
+	local_user_rec_t *local_user = (local_user_rec_t *)object;
+	if(local_user) {
+		if(local_user->acct_list)
+			list_destroy(local_user->acct_list);
+		xfree(local_user);
+	}
+}
+
+static void _destroy_local_cluster_rec(void *object)
+{
+	local_cluster_rec_t *local_cluster = (local_cluster_rec_t *)object;
+	if(local_cluster) {
+		xfree(local_cluster->name);
+		if(local_cluster->user_list)
+			list_destroy(local_cluster->user_list);
+		xfree(local_cluster);
+	}
+}
+
+/* 
+ * Comparator used for sorting users largest cpu to smallest cpu
+ * 
+ * returns: -1: user_a > user_b   0: user_a == user_b   1: user_a < user_b
+ * 
+ */
+static int _sort_user_dec(local_user_rec_t *user_a, local_user_rec_t *user_b)
+{
+	int diff = 0;
+
+	if (user_a->cpu_secs > user_b->cpu_secs)
+		return -1;
+	else if (user_a->cpu_secs < user_b->cpu_secs)
+		return 1;
+
+	if(!user_a->name || !user_b->name)
+		return 0;
+
+	diff = strcmp(user_a->name, user_b->name);
+
+	if (diff > 0)
+		return -1;
+	else if (diff < 0)
+		return 1;
+	
+	return 0;
+}
+
+
+static int _set_cond(int *start, int argc, char *argv[],
+		     acct_user_cond_t *user_cond, List format_list)
+{
+	int i;
+	int set = 0;
+	int end = 0;
+	int local_cluster_flag = all_clusters_flag;
+	acct_association_cond_t *assoc_cond = NULL;
+	
+	if(!user_cond) {
+		error("We need an acct_user_cond to call this");
+		return SLURM_ERROR;
+	}
+
+	if(!user_cond->user_list)
+		user_cond->user_list = list_create(slurm_destroy_char);
+
+	user_cond->with_deleted = 1;
+	user_cond->with_assocs = 1;
+	if(!user_cond->assoc_cond) {
+		user_cond->assoc_cond = 
+			xmalloc(sizeof(acct_association_cond_t));
+		user_cond->assoc_cond->with_usage = 1;
+	}
+	assoc_cond = user_cond->assoc_cond;
+	if(!assoc_cond->acct_list)
+		assoc_cond->acct_list = list_create(slurm_destroy_char);
+	if(!assoc_cond->cluster_list)
+		assoc_cond->cluster_list = list_create(slurm_destroy_char);
+
+	for (i=(*start); i<argc; i++) {
+		end = parse_option_end(argv[i]);
+		if (strncasecmp (argv[i], "Set", 3) == 0) {
+			i--;
+			break;
+		} else if(!end && !strncasecmp(argv[i], "where", 5)) {
+			continue;
+		} else if(!end && !strncasecmp(argv[i], "all_clusters", 1)) {
+			local_cluster_flag = 1;
+			continue;
+		} else if(!end) {
+			addto_char_list(user_cond->user_list, argv[i]);
+			set = 1;
+		} else if (strncasecmp (argv[i], "Accounts", 2) == 0) {
+				addto_char_list(assoc_cond->acct_list,
+					argv[i]+end);
+			set = 1;
+		} else if (strncasecmp (argv[i], "Clusters", 1) == 0) {
+			addto_char_list(assoc_cond->cluster_list,
+					argv[i]+end);
+			set = 1;
+		} else if (strncasecmp (argv[i], "End", 1) == 0) {
+			assoc_cond->usage_end = parse_time(argv[i]+end);
+			set = 1;
+		} else if (strncasecmp (argv[i], "Format", 1) == 0) {
+			if(format_list)
+				addto_char_list(format_list, argv[i]+end);
+		} else if (strncasecmp (argv[i], "group", 1) == 0) {
+			group_accts = 1;
+		} else if (strncasecmp (argv[i], "Start", 1) == 0) {
+			assoc_cond->usage_start = parse_time(argv[i]+end);
+			set = 1;
+		} else if (strncasecmp (argv[i], "Users", 1) == 0) {
+			addto_char_list(user_cond->user_list,
+					argv[i]+end);
+			set = 1;
+		} else {
+			printf(" Unknown condition: %s\n"
+			       "Use keyword set to modify value\n", argv[i]);
+		}
+	}
+	(*start) = i;
+
+	if(!local_cluster_flag && !list_count(assoc_cond->cluster_list)) {
+		char *temp = slurm_get_cluster_name();
+		if(temp)
+			list_append(assoc_cond->cluster_list, temp);
+	}
+
+	set_start_end_time((time_t *)&assoc_cond->usage_start,
+			   (time_t *)&assoc_cond->usage_end);
+
+	return set;
+}
+
+static int _setup_print_fields_list(List format_list)
+{
+	ListIterator itr = NULL;
+	print_field_t *field = NULL;
+	char *object = NULL;
+
+	if(!format_list || !list_count(format_list)) {
+		printf(" error: we need a format list to set up the print.\n");
+		return SLURM_ERROR;
+	}
+
+	if(!print_fields_list)
+		print_fields_list = list_create(destroy_print_field);
+
+	itr = list_iterator_create(format_list);
+	while((object = list_next(itr))) {
+		field = xmalloc(sizeof(print_field_t));
+		if(!strncasecmp("Accounts", object, 1)) {
+			field->type = PRINT_USER_ACCT;
+			field->name = xstrdup("Account(s)");
+			field->len = 15;
+			field->print_routine = print_fields_str;
+		} else if(!strncasecmp("Cluster", object, 1)) {
+			field->type = PRINT_USER_CLUSTER;
+			field->name = xstrdup("Cluster");
+			field->len = 9;
+			field->print_routine = print_fields_str;
+		} else if(!strncasecmp("Login", object, 1)) {
+			field->type = PRINT_USER_LOGIN;
+			field->name = xstrdup("Login");
+			field->len = 9;
+			field->print_routine = print_fields_str;
+		} else if(!strncasecmp("Proper", object, 1)) {
+			field->type = PRINT_USER_PROPER;
+			field->name = xstrdup("Proper Name");
+			field->len = 15;
+			field->print_routine = print_fields_str;
+		} else if(!strncasecmp("Used", object, 1)) {
+			field->type = PRINT_USER_USED;
+			field->name = xstrdup("Used");
+			if(time_format == SREPORT_TIME_SECS_PER)
+				field->len = 18;
+			else
+				field->len = 10;
+			field->print_routine = sreport_print_time;
+		} else {
+			printf("Unknown field '%s'\n", object);
+			xfree(field);
+			continue;
+		}
+		list_append(print_fields_list, field);		
+	}
+	list_iterator_destroy(itr);
+
+	return SLURM_SUCCESS;
+}
+
+extern int user_top(int argc, char *argv[])
+{
+	int rc = SLURM_SUCCESS;
+	acct_user_cond_t *user_cond = xmalloc(sizeof(acct_user_cond_t));
+	ListIterator itr = NULL;
+	ListIterator itr2 = NULL;
+	ListIterator itr3 = NULL;
+	ListIterator cluster_itr = NULL;
+	List format_list = list_create(slurm_destroy_char);
+	List user_list = NULL;
+	List cluster_list = list_create(_destroy_local_cluster_rec);
+	char *object = NULL;
+
+	int i=0;
+	uint32_t total_time = 0;
+	acct_user_rec_t *user = NULL;
+	acct_association_rec_t *assoc = NULL;
+	acct_accounting_rec_t *assoc_acct = NULL;
+	local_user_rec_t *local_user = NULL;
+	local_cluster_rec_t *local_cluster = NULL;
+	print_field_t *field = NULL;
+
+	print_fields_list = list_create(destroy_print_field);
+
+	_set_cond(&i, argc, argv, user_cond, format_list);
+
+	if(!list_count(format_list)) 
+		addto_char_list(format_list, "Cl,L,P,A,U");
+
+	_setup_print_fields_list(format_list);
+	list_destroy(format_list);
+
+	user_list = acct_storage_g_get_users(db_conn, user_cond);
+	if(!user_list) {
+		printf(" Problem with user query.\n");
+		goto end_it;
+	}
+
+	if(print_fields_have_header) {
+		char start_char[20];
+		char end_char[20];
+		time_t my_end = user_cond->assoc_cond->usage_end-1;
+
+		slurm_make_time_str(
+			(time_t *)&user_cond->assoc_cond->usage_start, 
+			start_char, sizeof(start_char));
+		slurm_make_time_str(&my_end,
+				    end_char, sizeof(end_char));
+		printf("----------------------------------------"
+		       "----------------------------------------\n");
+		printf("Top %u Users %s - %s (%d secs)\n", 
+		       top_limit, start_char, end_char, 
+		       (user_cond->assoc_cond->usage_end 
+			- user_cond->assoc_cond->usage_start));
+		printf("----------------------------------------"
+		       "----------------------------------------\n");
+	}
+	total_time = user_cond->assoc_cond->usage_end 
+		- user_cond->assoc_cond->usage_start;
+
+	itr = list_iterator_create(user_list);
+	cluster_itr = list_iterator_create(cluster_list);
+
+	while((user = list_next(itr))) {
+		if(!user->assoc_list || !list_count(user->assoc_list))
+			continue;
+		
+		itr2 = list_iterator_create(user->assoc_list);
+		while((assoc = list_next(itr2))) {
+			if(!assoc->accounting_list
+			   || !list_count(assoc->accounting_list))
+				continue;
+
+			while((local_cluster = list_next(cluster_itr))) {
+				if(!strcmp(local_cluster->name, 
+					   assoc->cluster)) {
+					ListIterator user_itr = 
+						list_iterator_create
+						(local_cluster->user_list); 
+					while((local_user 
+					       = list_next(user_itr))) {
+						if(!strcmp(local_user->name,
+							   assoc->user)) {
+							if(!group_accts &&
+							   !strcmp(local_user->
+								   name,
+								   assoc->
+								   user)) {
+								break;
+							} else if(group_accts)
+								break;
+						}
+					}
+					list_iterator_destroy(user_itr);
+					if(!local_user) {
+						local_user = xmalloc(
+							sizeof
+							(local_user_rec_t));
+						local_user->name =
+							xstrdup(assoc->user);
+						local_user->acct_list =
+							list_create
+							(slurm_destroy_char);
+					}
+					break;
+				}
+			}
+			list_iterator_reset(cluster_itr);
+			if(!local_cluster) {
+				local_cluster = 
+					xmalloc(sizeof(local_cluster_rec_t));
+				list_append(cluster_list, local_cluster);
+
+				local_cluster->name = xstrdup(assoc->cluster);
+				local_cluster->user_list = 
+					list_create(_destroy_local_user_rec);
+				local_user = 
+					xmalloc(sizeof(local_user_rec_t));
+				local_user->name = xstrdup(assoc->user);
+				local_user->acct_list = 
+					list_create(slurm_destroy_char);
+				list_append(local_cluster->user_list, 
+					    local_user);
+			}
+			
+			itr3 = list_iterator_create(local_user->acct_list);
+			while((object = list_next(itr3))) {
+				if(!strcmp(object, assoc->acct))
+					break;
+			}
+			list_iterator_destroy(itr3);
+
+			if(!object)
+				list_append(local_user->acct_list, 
+					    xstrdup(assoc->acct));
+			itr3 = list_iterator_create(assoc->accounting_list);
+			while((assoc_acct = list_next(itr3))) {
+				local_user->cpu_secs += assoc_acct->alloc_secs;
+				local_cluster->cpu_secs += 
+					assoc_acct->alloc_secs;
+			}
+			list_iterator_destroy(itr3);
+		}
+		list_iterator_destroy(itr2);
+	}	
+	list_iterator_destroy(itr);
+
+	itr2 = list_iterator_create(print_fields_list);
+	print_fields_header(print_fields_list);
+
+	list_iterator_reset(cluster_itr);
+	while((local_cluster = list_next(cluster_itr))) {
+		list_sort(local_cluster->user_list, (ListCmpF)_sort_user_dec);
+	
+		itr = list_iterator_create(local_cluster->user_list);
+		while((local_user = list_next(itr))) {
+			int count = 0;
+			while((field = list_next(itr2))) {
+				char *tmp_char = NULL;
+				struct passwd *pwd = NULL;
+				switch(field->type) {
+				case PRINT_USER_ACCT:
+					itr3 = list_iterator_create(
+						local_user->acct_list);
+					while((object = list_next(itr3))) {
+						if(tmp_char)
+							xstrfmtcat(tmp_char,
+								   ", %s",
+								   object);
+						else
+							xstrcat(tmp_char,
+								object);
+					}
+					list_iterator_destroy(itr3);
+					field->print_routine(
+						SLURM_PRINT_VALUE,
+						field,
+						tmp_char);
+					xfree(tmp_char);
+					break;
+				case PRINT_USER_CLUSTER:
+					field->print_routine(
+						SLURM_PRINT_VALUE,
+						field,
+						local_cluster->name);
+					break;
+				case PRINT_USER_LOGIN:
+					field->print_routine(SLURM_PRINT_VALUE,
+							     field,
+							     local_user->name);
+					break;
+				case PRINT_USER_PROPER:
+					pwd = getpwnam(local_user->name);
+					if(pwd) {
+						tmp_char = strtok(pwd->pw_gecos,
+								  ",");
+						if(!tmp_char)
+							tmp_char =
+								pwd->pw_gecos;
+					}
+					field->print_routine(SLURM_PRINT_VALUE,
+							     field,
+							     tmp_char);
+					break;
+				case PRINT_USER_USED:
+					field->print_routine(
+						SLURM_PRINT_VALUE,
+						field,
+						local_user->cpu_secs,
+						local_cluster->cpu_secs);
+					break;
+				default:
+					break;
+				}
+			}
+			list_iterator_reset(itr2);
+			printf("\n");
+			count++;
+			if(count >= top_limit)
+				break;
+		}
+		list_iterator_destroy(itr);
+	}
+	list_iterator_destroy(cluster_itr);
+end_it:
+	/* group_accts could be set in the set_cond function and needs
+	 * to be cleared here, or anytime _set_cond is called.
+	 */
+	group_accts = 0;
+	destroy_acct_user_cond(user_cond);
+	
+	if(user_list) {
+		list_destroy(user_list);
+		user_list = NULL;
+	}
+	
+	if(cluster_list) {
+		list_destroy(cluster_list);
+		cluster_list = NULL;
+	}
+	
+	if(print_fields_list) {
+		list_destroy(print_fields_list);
+		print_fields_list = NULL;
+	}
+
+	return rc;
+}
+
diff --git a/src/sreport/user_reports.h b/src/sreport/user_reports.h
new file mode 100644
index 0000000000000000000000000000000000000000..534c4eca1c6e2d3a1fe6a5288d80b3aaa41ebb8b
--- /dev/null
+++ b/src/sreport/user_reports.h
@@ -0,0 +1,47 @@
+/*****************************************************************************\
+ *  user_reports.h - functions for generating user reports
+ *                     from accounting infrastructure.
+ *****************************************************************************
+ *
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble <da@llnl.gov>
+ *  LLNL-CODE-402394.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef __SREPORT_USER_REPORTS_H
+#define __SREPORT_USER_REPORTS_H
+
+#include "sreport.h"
+
+extern int user_top(int argc, char *argv[]);
+
+#endif
diff --git a/src/srun/allocate.c b/src/srun/allocate.c
index 0ae0183341103208bcef576ba444fa7495de11b7..ca6091a28cbbdedeaa696099f62e16249d3f7c62 100644
--- a/src/srun/allocate.c
+++ b/src/srun/allocate.c
@@ -1,6 +1,6 @@
 /*****************************************************************************\
  * src/srun/allocate.c - srun functions for managing node allocations
- * $Id: allocate.c 13771 2008-04-02 20:03:47Z jette $
+ * $Id: allocate.c 14242 2008-06-11 23:29:49Z jette $
  *****************************************************************************
  *  Copyright (C) 2002-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -503,6 +503,8 @@ create_job_step(srun_job_t *job)
 	xstrfmtcat(totalview_jobid, "%u", job->ctx_params.job_id);
 
 	job->ctx_params.node_count = job->nhosts;
+	if (!opt.nprocs_set && (opt.ntasks_per_node != NO_VAL))
+		 opt.nprocs = job->nhosts * opt.ntasks_per_node;
 	job->ctx_params.task_count = opt.nprocs;
 	
 	job->ctx_params.cpu_count = opt.overcommit ? job->ctx_params.node_count
@@ -544,10 +546,10 @@ create_job_step(srun_job_t *job)
 	job->ctx_params.network = opt.network;
 	job->ctx_params.name = opt.job_name;
 	
-	debug("requesting job %d, user %d, nodes %d including (%s)", 
+	debug("requesting job %u, user %u, nodes %u including (%s)", 
 	      job->ctx_params.job_id, job->ctx_params.uid,
 	      job->ctx_params.node_count, job->ctx_params.node_list);
-	debug("cpus %d, tasks %d, name %s, relative %d", 
+	debug("cpus %u, tasks %u, name %s, relative %u", 
 	      job->ctx_params.cpu_count, job->ctx_params.task_count,
 	      job->ctx_params.name, job->ctx_params.relative);
 
diff --git a/src/srun/srun.c b/src/srun/srun.c
index 3a67fc6f70011dd5af13e07b2df5acb9b62786ed..c994af6b4314f8bb083087f578bccee73feb740a 100644
--- a/src/srun/srun.c
+++ b/src/srun/srun.c
@@ -116,33 +116,35 @@ struct {
 /*
  * forward declaration of static funcs
  */
-static void  _print_job_information(resource_allocation_response_msg_t *resp);
-static void  _set_prio_process_env(void);
-static int   _set_rlimit_env(void);
-static int   _set_umask_env(void);
-static char *_uint16_array_to_str(int count, const uint16_t *array);
 static int   _become_user (void);
-static void  _run_srun_prolog (srun_job_t *job);
-static void  _run_srun_epilog (srun_job_t *job);
-static int   _run_srun_script (srun_job_t *job, char *script);
-static int   _slurm_debug_env_val (void);
 static int   _call_spank_local_user (srun_job_t *job);
-static void  _set_stdio_fds(srun_job_t *job, slurm_step_io_fds_t *cio_fds);
 static void  _define_symbols(void);
+static void  _handle_intr();
+static void  _handle_signal(int signo);
+static void  _print_job_information(resource_allocation_response_msg_t *resp);
 static void  _pty_restore(void);
+static void  _run_srun_prolog (srun_job_t *job);
+static void  _run_srun_epilog (srun_job_t *job);
+static int   _run_srun_script (srun_job_t *job, char *script);
+static void  _set_cpu_env_var(resource_allocation_response_msg_t *resp);
+static int   _setup_signals();
 static void  _step_opt_exclusive(void);
-static void _task_start(launch_tasks_response_msg_t *msg);
-static void _task_finish(task_exit_msg_t *msg);
-static void _task_state_struct_init(int num_tasks);
-static void _task_state_struct_print(void);
-static void _task_state_struct_free(void);
-static void _handle_intr();
-static void _handle_signal(int signo);
-static int _setup_signals();
+static void  _set_stdio_fds(srun_job_t *job, slurm_step_io_fds_t *cio_fds);
+static void  _set_prio_process_env(void);
+static int   _set_rlimit_env(void);
+static int   _set_umask_env(void);
+static int   _slurm_debug_env_val (void);
+static void  _task_start(launch_tasks_response_msg_t *msg);
+static void  _task_finish(task_exit_msg_t *msg);
+static void  _task_state_struct_init(int num_tasks);
+static void  _task_state_struct_print(void);
+static void  _task_state_struct_free(void);
+static char *_uint16_array_to_str(int count, const uint16_t *array);
 
 int srun(int ac, char **av)
 {
 	resource_allocation_response_msg_t *resp;
+	int debug_level;
 	env_t *env = xmalloc(sizeof(env_t));
 	uint32_t job_id = 0;
 	log_options_t logopt = LOG_OPTS_STDERR_ONLY;
@@ -158,7 +160,8 @@ int srun(int ac, char **av)
 	env->env = NULL;
 	env->ckpt_path = NULL;
 
-	logopt.stderr_level += _slurm_debug_env_val();
+	debug_level = _slurm_debug_env_val();
+	logopt.stderr_level += debug_level;
 	log_init(xbasename(av[0]), logopt, 0, NULL);
 
 /* 	xsignal(SIGQUIT, _ignore_signal); */
@@ -198,7 +201,8 @@ int srun(int ac, char **av)
 		logopt.stderr_level -= opt.quiet;
 		logopt.prefix_level = 1;
 		log_alter(logopt, 0, NULL);
-	}
+	} else
+		_verbose = debug_level;
 
 	(void) _set_rlimit_env();
 	_set_prio_process_env();
@@ -230,7 +234,7 @@ int srun(int ac, char **av)
                        opt.alloc_nodelist = xstrdup(resp->node_list);
 		if (opt.exclusive)
 			_step_opt_exclusive();
-
+		_set_cpu_env_var(resp);
 		job = job_step_create_allocation(resp);
 		slurm_free_resource_allocation_response_msg(resp);
 
@@ -251,6 +255,7 @@ int srun(int ac, char **av)
 		if ( !(resp = allocate_nodes()) ) 
 			exit(1);
 		_print_job_information(resp);
+		_set_cpu_env_var(resp);
 		job = job_create_allocation(resp);
 		opt.exclusive = false;	/* not applicable for this step */
 		if (!job || create_job_step(job) < 0) {
@@ -503,23 +508,23 @@ static void
 _print_job_information(resource_allocation_response_msg_t *resp)
 {
 	int i;
-	char tmp_str[10], job_details[4096];
+	char *str = NULL;
+	char *sep = "";
 
-	sprintf(job_details, "jobid %d: nodes(%d):`%s', cpu counts: ", 
-	        resp->job_id, resp->node_cnt, resp->node_list);
+	if (!_verbose)
+		return;
+
+	xstrfmtcat(str, "jobid %u: nodes(%u):`%s', cpu counts: ",
+		   resp->job_id, resp->node_cnt, resp->node_list);
 
 	for (i = 0; i < resp->num_cpu_groups; i++) {
-		sprintf(tmp_str, ",%u(x%u)", resp->cpus_per_node[i], 
-		        resp->cpu_count_reps[i]);
-		if (i == 0)
-			strcat(job_details, &tmp_str[1]);
-		else if ((strlen(tmp_str) + strlen(job_details)) < 
-		         sizeof(job_details))
-			strcat(job_details, tmp_str);
-		else
-			break;
+		xstrfmtcat(str, "%s%u(x%u)",
+			   sep, resp->cpus_per_node[i],
+		           resp->cpu_count_reps[i]);
+		sep = ",";
 	}
-	verbose("%s",job_details);
+	verbose("%s", str);
+	xfree(str);
 }
 
 /* Set SLURM_UMASK environment variable with current state */
@@ -572,6 +577,22 @@ static void  _set_prio_process_env(void)
 	debug ("propagating SLURM_PRIO_PROCESS=%d", retval);
 }
 
+static void _set_cpu_env_var(resource_allocation_response_msg_t *resp)
+{
+	char *tmp;
+
+	if (getenv("SLURM_JOB_CPUS_PER_NODE"))
+		return;
+
+	tmp = uint32_compressed_to_str((uint32_t)resp->num_cpu_groups,
+				       resp->cpus_per_node,
+				       resp->cpu_count_reps);
+	if (setenvf(NULL, "SLURM_JOB_CPUS_PER_NODE", "%s", tmp) < 0)
+		error("unable to set SLURM_JOB_CPUS_PER_NODE in environment");
+	xfree(tmp);
+	return;
+} 
+
 /* Set SLURM_RLIMIT_* environment variables with current resource 
  * limit values, reset RLIMIT_NOFILE to maximum possible value */
 static int _set_rlimit_env(void)
diff --git a/src/sstat/print.c b/src/sstat/print.c
index 0ff252c9d25bce0afd3a79b21507d9d0e0fb5119..8057f4a12a2ce2a82c47463325b7cfc08be74561 100644
--- a/src/sstat/print.c
+++ b/src/sstat/print.c
@@ -298,13 +298,13 @@ extern void print_rss(type_t type, void *object)
 		nodes = job->nodes;
 		pos = sacct.min_cpu_id.nodeid;				 
 		convert_num_unit((float)sacct.max_rss, buf1, sizeof(buf1),
-				 UNIT_NONE);
+				 UNIT_KILO);
 
 		if(job->track_steps)
 			snprintf(outbuf, FORMAT_STRING_SIZE, "%s/- - -", buf1);
 		else {
 			convert_num_unit((float)sacct.ave_rss, 
-					 buf2, sizeof(buf2), UNIT_NONE);
+					 buf2, sizeof(buf2), UNIT_KILO);
 			find_hostname(pos, nodes, buf3);
 			snprintf(outbuf, FORMAT_STRING_SIZE, "%s/%s:%u - %s", 
 				 buf1,
@@ -319,9 +319,9 @@ extern void print_rss(type_t type, void *object)
 		nodes = step->nodes;
 		pos = sacct.min_cpu_id.nodeid;				 
 		convert_num_unit((float)sacct.max_rss, buf1, sizeof(buf1),
-				 UNIT_NONE);
+				 UNIT_KILO);
 		convert_num_unit((float)sacct.ave_rss, buf2, sizeof(buf2),
-				 UNIT_NONE);
+				 UNIT_KILO);
 		find_hostname(pos, nodes, buf3);
 		snprintf(outbuf, FORMAT_STRING_SIZE, "%s/%s:%u - %s", 
 			 buf1,
@@ -400,12 +400,12 @@ extern void print_vsize(type_t type, void *object)
 		nodes = job->nodes;
 		pos = sacct.min_cpu_id.nodeid;				 
 		convert_num_unit((float)sacct.max_vsize, 
-				 buf1, sizeof(buf1),UNIT_NONE);
+				 buf1, sizeof(buf1),UNIT_KILO);
 		if(job->track_steps)
 			snprintf(outbuf, FORMAT_STRING_SIZE, "%s/- - -", buf1);
 		else {
 			convert_num_unit((float)sacct.ave_vsize,
-					 buf2, sizeof(buf2), UNIT_NONE);
+					 buf2, sizeof(buf2), UNIT_KILO);
 			find_hostname(pos, nodes, buf3);
 			snprintf(outbuf, FORMAT_STRING_SIZE, "%s/%s:%u - %s", 
 				 buf1,
@@ -420,9 +420,9 @@ extern void print_vsize(type_t type, void *object)
 		nodes = step->nodes;
 		pos = sacct.min_cpu_id.nodeid;				 
 		convert_num_unit((float)sacct.max_vsize, buf1, sizeof(buf1), 
-				 UNIT_NONE);
+				 UNIT_KILO);
 		convert_num_unit((float)sacct.ave_vsize, buf2, sizeof(buf2),
-				 UNIT_NONE);
+				 UNIT_KILO);
 		find_hostname(pos, nodes, buf3);
 		snprintf(outbuf, FORMAT_STRING_SIZE, "%s/%s:%u - %s", 
 			 buf1,
diff --git a/src/sstat/sstat.c b/src/sstat/sstat.c
index 58f75d55df94ff5c51fdbf63c9ee3d86e662cdf4..62f246aebd05fe318efdb5e0d991b25e4ecf41ae 100644
--- a/src/sstat/sstat.c
+++ b/src/sstat/sstat.c
@@ -170,11 +170,6 @@ int _sstat_query(slurm_step_layout_t *step_layout, uint32_t job_id,
 cleanup:
 	
 	if(tot_tasks) {
-		step.sacct.ave_rss *= 1024;
-		step.sacct.max_rss *= 1024;
-		step.sacct.ave_vsize *= 1024;
-		step.sacct.max_vsize *= 1024;
-
 		step.sacct.ave_cpu /= tot_tasks;
 		step.sacct.ave_cpu /= 100;
 		step.sacct.min_cpu /= 100;
diff --git a/src/sview/popups.c b/src/sview/popups.c
index f48612022787a4381144f1ffc9f96e99258c1c26..b23f5319264a416bb517b8a3c2f1aefe402ed99b 100644
--- a/src/sview/popups.c
+++ b/src/sview/popups.c
@@ -1,7 +1,9 @@
 /****************************************************************************\
  *  popups.c - put different popup displays here
  *****************************************************************************
- *  Copyright (C) 2002-2006 The Regents of the University of California.
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Portions Copyright (C) 2008 Vijay Ramasubramanian
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>, et. al.
  *  LLNL-CODE-402394.
@@ -630,6 +632,9 @@ extern void create_daemon_popup(GtkAction *action, gpointer user_data)
 	if ((n = slurm_conf_get_nodename(me))) {
 		d = 1;
 		xfree(n);
+	} else if ((n = slurm_conf_get_aliased_nodename())) {
+		d = 1;
+		xfree(n);
 	} else if ((n = slurm_conf_get_nodename("localhost"))) {
 		d = 1;
 		xfree(n);
diff --git a/src/sview/sview.c b/src/sview/sview.c
index f277da115700c57e8f1ffd4c9efedbb67afd5e1b..74979e6ba514ce1f5445df113fc7d7f15a4a9719 100644
--- a/src/sview/sview.c
+++ b/src/sview/sview.c
@@ -1,7 +1,8 @@
 /****************************************************************************\
  *  sview.c - main for sview
  *****************************************************************************
- *  Copyright (C) 2002-2006 The Regents of the University of California.
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>, et. al.
  *  LLNL-CODE-402394.
diff --git a/testsuite/expect/globals b/testsuite/expect/globals
index 8fc3abccb3c34773bbce91bb21d7c8de531158df..cdb60c4ae9a40858d60a09eef5e07c670729f813 100755
--- a/testsuite/expect/globals
+++ b/testsuite/expect/globals
@@ -36,7 +36,7 @@
 # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 ############################################################################
 
-global sacctmgr sacct salloc sattach sbatch sbcast scancel scontrol sinfo smap squeue srun
+global sacctmgr sacct salloc sattach sbatch sbcast scancel scontrol sinfo smap squeue sreport srun sstat strigger
 
 # Conditional set.  Only set variable if variable does not yet exist.
 proc cset {name value} {
@@ -68,6 +68,8 @@ cset sinfo       "${slurm_dir}/bin/sinfo"
 cset smap        "${slurm_dir}/bin/smap"
 cset squeue      "${slurm_dir}/bin/squeue"
 cset srun        "${slurm_dir}/bin/srun"
+cset sreport     "${slurm_dir}/bin/sreport"
+cset sstat        "${slurm_dir}/bin/sstat"
 cset strigger    "${slurm_dir}/bin/strigger"
 
 cset pbsnodes    "${slurm_dir}/bin/pbsnodes"
@@ -504,6 +506,81 @@ proc wait_for_step { step_id } {
 }
 
 
+################################################################
+#
+# Proc: test_assoc_enforced
+#
+# Purpose: Determine if we need an association to run a job. 
+# This is based upon 
+# the value of AccountingStorageEnforce in the slurm.conf.
+#
+# Returns 1 if the system is enforcing associations, 0 otherwise
+#
+################################################################
+
+proc test_assoc_enforced { } {
+	global scontrol
+
+	log_user 0
+	set assoc_enforced 0
+	spawn $scontrol show config
+	expect {
+		"AccountingStorageEnforce = 1" {
+			set assoc_enforced 1
+			exp_continue
+		}
+		eof {
+			wait
+		}
+	}
+	log_user 1
+	
+	return $assoc_enforced
+}
+
+################################################################
+#
+# Proc: get_default_acct
+#
+# Purpose: get users default account.
+#
+# Returns name of default account if exists, NULL otherwise
+#
+################################################################
+
+proc get_default_acct { user } {
+	global sacctmgr alpha_numeric_under bin_id
+
+	log_user 0
+	set def_acct ""
+
+	if { !$user } {
+		spawn $bin_id -un
+		expect {      
+	       	       -re "($alpha_numeric_under)" {
+		       	   set user $expect_out(1,string)
+		       }
+		       eof {
+		    	   wait
+		       }
+		}
+	}
+
+	spawn $sacctmgr -n list user $user format="d"
+	expect {
+		-re "($alpha_numeric_under)" {
+			set def_acct $expect_out(1,string)
+			exp_continue
+		}
+		eof {
+			wait
+		}
+	}
+	log_user 1
+	
+	return $def_acct
+}
+
 ################################################################
 #
 # Proc: test_front_end
diff --git a/testsuite/expect/test1.42 b/testsuite/expect/test1.42
index 430d7d5af42a5286a76faba1affdcc192447a64f..644dfafe45c78728011874c658c484d535d57559 100755
--- a/testsuite/expect/test1.42
+++ b/testsuite/expect/test1.42
@@ -40,6 +40,11 @@ set job_id2     0
 
 print_header $test_id
 
+if {[test_assoc_enforced]} {
+	send_user "\nWARNING: This test will not work when associations are enforced.\n"
+	exit $exit_code
+}
+
 global env
 set env(SLURM_ACCOUNT) QA_ACCT
 
diff --git a/testsuite/expect/test1.80 b/testsuite/expect/test1.80
index 404781af5525d0f03c7cc60a7a76197e7ca6d971..a3b3e11c625dcac3fa99a85de956ff9337200efa 100755
--- a/testsuite/expect/test1.80
+++ b/testsuite/expect/test1.80
@@ -9,7 +9,8 @@
 #          "FAILURE: ..." otherwise with an explanation of the failure, OR
 #          anything else indicates a failure mode that must be investigated.
 ############################################################################
-# Copyright (C) 2002-2006 The Regents of the University of California.
+# Copyright (C) 2002-2007 The Regents of the University of California.
+# Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
 # LLNL-CODE-402394.
@@ -51,7 +52,11 @@ set expected_layout [list 0 0 1 1]
 set tested_layout [list -1 -1 -1 -1] 
 set srun_pid [spawn $srun -O -N2 -n4 -l --distribution=block -t1 $bin_printenv SLURM_NODEID]
 expect {
-	-re "More ($alpha) requested than permitted" {
+	-re "More processors requested than permitted" {
+		send_user "\nWARNING: can't test srun task distribution\n"
+		exit 0
+	}
+	-re "Node count specification invalid" {
 		send_user "\nWARNING: can't test srun task distribution\n"
 		exit 0
 	}
@@ -94,7 +99,11 @@ set expected_layout [list 0 1 0 1]
 set tested_layout [list -1 -1 -1 -1] 
 set srun_pid [spawn $srun -O -N2 -n4 -l --distribution=cyclic -t1 $bin_printenv SLURM_NODEID]
 expect {
-	-re "More ($alpha) requested than permitted" {
+	-re "More processors requested than permitted" {
+		send_user "\nWARNING: can't test srun task distribution\n"
+		exit 0
+	}
+	-re "Node count specification invalid" {
 		send_user "\nWARNING: can't test srun task distribution\n"
 		exit 0
 	}
diff --git a/testsuite/expect/test1.81 b/testsuite/expect/test1.81
index bd48d432e294cf143cbeb4c08ccd7252c56fe4dc..662bbee15e0b387b1a98144383c3f3b5ff6aff92 100755
--- a/testsuite/expect/test1.81
+++ b/testsuite/expect/test1.81
@@ -9,7 +9,8 @@
 #          "FAILURE: ..." otherwise with an explanation of the failure, OR
 #          anything else indicates a failure mode that must be investigated.
 ############################################################################
-# Copyright (C) 2002 The Regents of the University of California.
+# Copyright (C) 2002-2007 The Regents of the University of California.
+# Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
 # LLNL-CODE-402394.
@@ -225,7 +226,7 @@ set host_3      ""
 set timeout $max_job_delay
 set srun_pid [spawn $srun -N2-3 -l -t1 $bin_printenv SLURM_NODEID]
 expect {
-	-re "More ($alpha) requested than permitted" {
+	-re "Node count specification invalid" {
 		send_user "\nWARNING: can't test srun task distribution\n"
 		exit $exit_code
 	}
diff --git a/testsuite/expect/test1.82 b/testsuite/expect/test1.82
index e91a3f388867b6b43aab0c81dd86e2efa0145b60..6c56e40c5fb0c8d441a42e50745115d70d67cda6 100755
--- a/testsuite/expect/test1.82
+++ b/testsuite/expect/test1.82
@@ -9,7 +9,8 @@
 #          "FAILURE: ..." otherwise with an explanation of the failure, OR
 #          anything else indicates a failure mode that must be investigated.
 ############################################################################
-# Copyright (C) 2002-2006 The Regents of the University of California.
+# Copyright (C) 2002-2007 The Regents of the University of California.
+# Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
 # LLNL-CODE-402394.
@@ -66,7 +67,7 @@ if {$available < $node_cnt} {
 
 set srun_pid [spawn $srun -N$node_cnt -l -t1 $bin_printenv SLURMD_NODENAME]
 expect {
-	-re "More ($alpha) requested than permitted" {
+	-re "Node count specification invalid" {
 		send_user "\nWARNING: can't test srun task distribution\n"
 		exit $exit_code
 	}
diff --git a/testsuite/expect/test1.83 b/testsuite/expect/test1.83
index a5ec1a9b41a692e6d303942b97f0cf6b17fe02f4..4e20b25096f814582122f4703b77fcd2ed7f2e86 100755
--- a/testsuite/expect/test1.83
+++ b/testsuite/expect/test1.83
@@ -13,7 +13,8 @@
 #          the value of <number> indicates the nodes relative location. 
 #          Change tha node name parsing logic as needed for other formats.
 ############################################################################
-# Copyright (C) 2002 The Regents of the University of California.
+# Copyright (C) 2002-2007 The Regents of the University of California.
+# Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
 # LLNL-CODE-402394.
@@ -66,7 +67,7 @@ set host_2_num       0
 set timeout $max_job_delay
 set srun_pid [spawn $srun -N3-3 --contiguous -l -t1 $bin_printenv SLURMD_NODENAME]
 expect {
-	-re "More ($alpha) requested than permitted" {
+	-re "Node count specification invalid" {
 		send_user "\nWARNING: can't test srun task distribution\n"
 		exit $exit_code
 	}
diff --git a/testsuite/expect/test1.86 b/testsuite/expect/test1.86
index 380e80997e4846e18d8e3af5fcbaf2faba812aab..fd6dc70432c59a555aad27895d17c5fead4ab27d 100755
--- a/testsuite/expect/test1.86
+++ b/testsuite/expect/test1.86
@@ -9,7 +9,8 @@
 #          "FAILURE: ..." otherwise with an explanation of the failure, OR
 #          anything else indicates a failure mode that must be investigated.
 ############################################################################
-# Copyright (C) 2002-2006 The Regents of the University of California.
+# Copyright (C) 2002-2007 The Regents of the University of California.
+# Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
 # LLNL-CODE-402394.
@@ -64,7 +65,7 @@ make_bash_script $file_in "
 set timeout $max_job_delay
 set salloc_pid [spawn $salloc -N2 -t1 ./$file_in]
 expect {
-	-re "More ($alpha) requested than permitted" {
+	-re "Node count specification invalid" {
 		send_user "\nWARNING: can't test srun task distribution\n"
 		exec $bin_rm -f $file_in
 		exit $exit_code
diff --git a/testsuite/expect/test1.87 b/testsuite/expect/test1.87
index 9442883ed3bfaa1effc051dad1dd7eb042a79251..53d5fc2063f120c9eedb06bb10206847c8cc2046 100755
--- a/testsuite/expect/test1.87
+++ b/testsuite/expect/test1.87
@@ -9,7 +9,8 @@
 #          "FAILURE: ..." otherwise with an explanation of the failure, OR
 #          anything else indicates a failure mode that must be investigated.
 ############################################################################
-# Copyright (C) 2002 The Regents of the University of California.
+# Copyright (C) 2002-2007 The Regents of the University of California.
+# Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
 # LLNL-CODE-402394.
@@ -59,7 +60,12 @@ make_bash_script $file_in "
 set timeout $max_job_delay
 set srun_pid [spawn $salloc -N4 ./$file_in]
 expect {
-	-re "More ($alpha) requested than permitted" {
+	-re "More processors requested than permitted" {
+		send_user "\nWARNING: can't test srun task distribution\n"
+		exec $bin_rm -f $file_in
+		exit $exit_code
+	}
+	-re "Node count specification invalid" {
 		send_user "\nWARNING: can't test srun task distribution\n"
 		exec $bin_rm -f $file_in
 		exit $exit_code
diff --git a/testsuite/expect/test1.91 b/testsuite/expect/test1.91
index 71c5a832a6d7e3667064001fdc9ef65968628974..f0a2669ffebfeea0753576330bdec4f3f542b8bd 100755
--- a/testsuite/expect/test1.91
+++ b/testsuite/expect/test1.91
@@ -63,19 +63,28 @@ send_user "\ntask affinity plugin installed\n"
 set num_sockets 0
 set num_cores   0
 set num_threads 0
+set node_name ""
 log_user 0
+
+# Here we need to get the last node since if we run this on heterogeneous
+# systems the count can change
+
 spawn $scontrol show node
 expect {
+	-re "NodeName=($alpha_numeric)" {
+		set node_name $expect_out(1,string)
+		exp_continue
+	}
 	-re "Sockets=($number)" {
 		set num_sockets $expect_out(1,string)
 		exp_continue
 	}
 	-re "Cores=($number)" {
-		set num_cores $expect_out(1,string)
+	   	set num_cores $expect_out(1,string)
 		exp_continue
 	}
 	-re "Threads=($number)" {
-		set num_threads $expect_out(1,string)
+	   	set num_threads $expect_out(1,string)
 		exp_continue
 	}
 	eof {
@@ -101,7 +110,7 @@ exec $bin_chmod 700 $file_prog
 #
 global env
 set env(SLURM_CPU_BIND) "verbose"
-set salloc_pid [spawn $salloc -N1 --exclusive --verbose -t2 $bin_bash]
+set salloc_pid [spawn $salloc -w $node_name -N1 --exclusive --verbose -t2 $bin_bash]
 
 #############################################################################
 #
diff --git a/testsuite/expect/test1.92 b/testsuite/expect/test1.92
index a4f1719a9d5fee4152be202f3199ffc4c2214b87..0938a54f0cebf307998705fa96e6342afac4f590 100755
--- a/testsuite/expect/test1.92
+++ b/testsuite/expect/test1.92
@@ -8,7 +8,8 @@
 #          "FAILURE: ..." otherwise with an explanation of the failure, OR
 #          anything else indicates a failure mode that must be investigated.
 ############################################################################
-# Copyright (C) 2005 The Regents of the University of California.
+# Copyright (C) 2005-2007 The Regents of the University of California.
+# Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
 # LLNL-CODE-402394.
@@ -53,7 +54,11 @@ exit 0
 #
 set salloc_pid [spawn $salloc -N2 -n4 --verbose -t2 $bin_bash]
 expect {
-	-re "More ($alpha) requested than permitted" {
+	-re "More processors requested than permitted" {
+		send_user "\nWARNING: can't test srun task distribution\n"
+		exit 0
+	}
+	-re "Node count specification invalid" {
 		send_user "\nWARNING: can't test srun task distribution\n"
 		exit 0
 	}
diff --git a/testsuite/expect/test1.93 b/testsuite/expect/test1.93
index 6ff305d4cde70002947dd8e6ee68e262bb146e6e..da580328643bd5989fdb3d7b4cefd1e84791663b 100755
--- a/testsuite/expect/test1.93
+++ b/testsuite/expect/test1.93
@@ -58,7 +58,7 @@ make_bash_script $file_in "
 set timeout $max_job_delay
 set salloc_pid [spawn $salloc -t1 -n2 ./$file_in]
 expect {
-	-re "More ($alpha) requested than permitted" {
+	-re "More processors requested than permitted" {
 		send_user "\nWARNING: can't test srun task distribution\n"
 		exec $bin_rm -f $file_in
 		exit $exit_code
diff --git a/testsuite/expect/test12.2 b/testsuite/expect/test12.2
index 30f006010f9734d8e9724153e1286491cb32aaf4..e4d9a66e344a89f804bc14aeea6f0a4c29fe69b0 100755
--- a/testsuite/expect/test12.2
+++ b/testsuite/expect/test12.2
@@ -143,7 +143,7 @@ exec $bin_sleep 5
 set mem_used -1
 set mem_task -1
 set ave_used -1
-spawn $sacct --noheader --stat --job=$job_id.0 --fields vsize
+spawn $sstat --noheader --job=$job_id.0 --fields vsize
 expect {
 	-re "($float)(\[KM\]*)/.*($number) - ($float)(\[KM\]*)" {
 		set mem_used  $expect_out(1,string)
@@ -168,7 +168,7 @@ expect {
 		exp_continue
 	}
 	timeout {
-		send_user "\nFAILURE: sacct not responding\n"
+		send_user "\nFAILURE: sstat not responding\n"
 		set exit_code 1
 	}
 	eof {
@@ -177,24 +177,24 @@ expect {
 }
 
 if { $mem_used == -1 } {
-	send_user "\nFAILURE: sacct stat memory not found\n"
+	send_user "\nFAILURE: sstat stat memory not found\n"
 	set exit_code 1
 } elseif { $mem_task != 0 } {
-	send_user "\nFAILURE: sacct stat memory task not found\n"
+	send_user "\nFAILURE: sstat stat memory task not found\n"
 	set exit_code 1	
 } elseif { $ave_used != $mem_used } {
-	send_user "\nFAILURE: sacct stat memory task not equal to ave memory\n"
+	send_user "\nFAILURE: sstat stat memory task not equal to ave memory\n"
 	set exit_code 1	
 }
 # Compute error in KB
 set diff_mem [expr $mem_used - $mem_size]
 set error_mem [expr abs($diff_mem)]
 if {$error_mem > 4000} {
-	send_user "\nFAILURE: sacct memory use discrepancy of $error_mem KB\n"
+	send_user "\nFAILURE: sstat memory use discrepancy of $error_mem KB\n"
 	send_user "  Wanted $mem_size KB, got $mem_used KB\n"
 	set exit_code 1
 } else {
-	send_user "\nSUCCESS: sacct memory use discrepancy of $error_mem KB\n"
+	send_user "\nSUCCESS: sstat memory use discrepancy of $error_mem KB\n"
 }
 
 #
@@ -236,7 +236,7 @@ expect {
 		wait
 	}
 }
-if {$matches != 4} {
+if {$matches < 4} {
 	send_user "\nFAILURE: sacct reporting failed $matches\n"
 	set exit_code 1
 }
diff --git a/testsuite/expect/test15.14 b/testsuite/expect/test15.14
index b59f3867a7dfaaee2f30b0b1bcf55b1aefd73c7b..85c7d17bf48e903f804278a7c2fcfb972131aaa7 100755
--- a/testsuite/expect/test15.14
+++ b/testsuite/expect/test15.14
@@ -37,9 +37,15 @@ set file_in     "test$test_id.input"
 set exit_code   0
 set job_id1     0
 set job_id2     0
+set job_acct    "TEST_ACCT"
 
 print_header $test_id
 
+if {[test_assoc_enforced]} {
+	send_user "\nWARNING: This test will not work when associations are enforced.\n"
+	exit $exit_code
+}
+
 global env
 set env(SALLOC_ACCOUNT) QA_ACCT
 
@@ -52,7 +58,7 @@ make_bash_script $file_in "$bin_sleep 10"
 # Spawn a srun batch job that just sleeps for a while
 #
 set timeout $max_job_delay
-spawn $sbatch --output=/dev/null --error=/dev/null --account=MY_ACCT -t1 $file_in
+spawn $sbatch --output=/dev/null --error=/dev/null --account=$job_acct -t1 $file_in
 expect {
 	 -re "Submitted batch job ($number)" {
 		set job_id1 $expect_out(1,string)
@@ -86,7 +92,7 @@ expect {
 		set match_state 1
 		exp_continue
 	}
-	-re "Account=MY_ACCT" {
+	-re "Account=$job_acct" {
 		set match_acct 1
 		exp_continue
 	}
diff --git a/testsuite/expect/test15.19 b/testsuite/expect/test15.19
index 54c93c518538b1563ac35216bb8535768b7bfbe6..8b673cb61c9ae8e2635fda65f4662c21bdb5318b 100755
--- a/testsuite/expect/test15.19
+++ b/testsuite/expect/test15.19
@@ -9,7 +9,8 @@
 #          "FAILURE: ..." otherwise with an explanation of the failure, OR
 #          anything else indicates a failure mode that must be investigated.
 ############################################################################
-# Copyright (C) 2002-2006 The Regents of the University of California.
+# Copyright (C) 2002-2007 The Regents of the University of California.
+# Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
 # LLNL-CODE-402394.
@@ -260,7 +261,11 @@ expect {
 		set job_id $expect_out(1,string)
 		exp_continue
 	}
-	-re "More ($alpha) requested than permitted" {
+	-re "More processors requested than permitted" {
+		send_user "\nWARNING: can't test salloc task distribution\n"
+		exit $exit_code
+	}
+	-re "Node count specification invalid" {
 		send_user "\nWARNING: can't test salloc task distribution\n"
 		exit $exit_code
 	}
diff --git a/testsuite/expect/test15.20 b/testsuite/expect/test15.20
index 17138750945addcb9caf28c6ab348a83e308e2c8..aa28b812480da765765515514e645cdc971edfa9 100755
--- a/testsuite/expect/test15.20
+++ b/testsuite/expect/test15.20
@@ -9,7 +9,8 @@
 #          "FAILURE: ..." otherwise with an explanation of the failure, OR
 #          anything else indicates a failure mode that must be investigated.
 ############################################################################
-# Copyright (C) 2002-2006 The Regents of the University of California.
+# Copyright (C) 2002-2008 The Regents of the University of California.
+# Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
 # LLNL-CODE-402394.
@@ -72,7 +73,11 @@ expect {
 		send "$bin_echo MY_ID=\$SLURM_JOBID \n"
 		exp_continue
 	}
-	-re "More ($alpha) requested than permitted" {
+	-re "More processors requested than permitted" {
+		send_user "\nWARNING: can't test salloc task distribution\n"
+		exit $exit_code
+	}
+	-re "Node count specification invalid" {
 		send_user "\nWARNING: can't test salloc task distribution\n"
 		exit $exit_code
 	}
diff --git a/testsuite/expect/test15.23 b/testsuite/expect/test15.23
index 3ef5b70a66a02dbb0486664e6c78047054e1af9a..e1b6d7e79e5a0804ba51d0a4adc5232399e5b2f9 100755
--- a/testsuite/expect/test15.23
+++ b/testsuite/expect/test15.23
@@ -36,14 +36,20 @@ set test_id          "15.23"
 set exit_code        0
 set job_id           0
 set timeout          $max_job_delay
+set job_acct    "TEST_ACCT"
 
 print_header $test_id
 
+if {[test_assoc_enforced]} {
+	send_user "\nWARNING: Using users default account instead of $job_acct since associations are enforced.\n"
+	set job_acct [get_default_acct 0]
+}
+
 #
 # Set target environment variables
 #
 global env
-set env(SALLOC_ACCOUNT)       tst.acct
+set env(SALLOC_ACCOUNT)       $job_acct
 set env(SALLOC_DEBUG)         2
 set env(SALLOC_TIMELIMIT)     2
 
@@ -72,7 +78,7 @@ expect {
 		}
 		exp_continue
 	}
-	-re "Account=tst.acct" {
+	-re "Account=$job_acct" {
 		incr matches
 		send "exit\n"
 		exp_continue
diff --git a/testsuite/expect/test17.18 b/testsuite/expect/test17.18
index f6c03097b90398abc3858c0f924a6cc9964d0299..0a60689411e6f6ed96dfd05966e2565b8b75b98e 100755
--- a/testsuite/expect/test17.18
+++ b/testsuite/expect/test17.18
@@ -38,9 +38,15 @@ set file_out    "test$test_id.output"
 set exit_code   0
 set job_id1     0
 set job_id2     0
+set job_acct    "QA_ACCT"
 
 print_header $test_id
 
+if {[test_assoc_enforced]} {
+	send_user "\nWARNING: This test will not work when associations are enforced.\n"
+	exit $exit_code
+}
+
 global env
 set env(SBATCH_ACCOUNT) QA_ACCT
 
@@ -198,11 +204,11 @@ expect {
 		incr match
 		exp_continue
 	}
-	-re "StartTime=($number)/($number)-12:00:00" {
+	-re "EligibleTime=($number)/($number)-12:00:00" {
 		incr match
 		exp_continue
 	}
-	-re "StartTime=($number)-($number)-($number)T12:00:00" {
+	-re "EligibleTime=($number)-($number)-($number)T12:00:00" {
 		incr match
 		exp_continue
 	}
@@ -216,7 +222,7 @@ expect {
 	}
 }
 if {$match != 2} {
-	send_user "\nFAILURE: unexpected JobState or StartTime\n"
+	send_user "\nFAILURE: unexpected JobState or EligibleTime\n"
 	set exit_code 1
 }
 
diff --git a/testsuite/expect/test17.21 b/testsuite/expect/test17.21
index 099e10fa24dc8328b27851d46299558be35a25c3..a2e2cac84a74fb5bbd193dc33c60d33ba24cb83e 100755
--- a/testsuite/expect/test17.21
+++ b/testsuite/expect/test17.21
@@ -7,7 +7,8 @@
 #          "FAILURE: ..." otherwise with an explanation of the failure, OR
 #          anything else indicates a failure mode that must be investigated.
 ############################################################################
-# Copyright (C) 2005-2006 The Regents of the University of California.
+# Copyright (C) 2005-2007 The Regents of the University of California.
+# Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Danny Auble <da@llnl.gov>
 # LLNL-CODE-402394.
@@ -41,6 +42,11 @@ set delay       10
 
 print_header $test_id
 
+if {[test_assoc_enforced]} {
+	send_user "\nWARNING: Using users default account instead of $job_acct since associations are enforced.\n"
+	set job_acct [get_default_acct 0]
+}
+
 make_bash_script $file_in "
 #SBATCH --job-name=$job_name
 #SBATCH --account=$job_acct
@@ -99,7 +105,7 @@ $bin_sleep $delay
 
 set sbatch_pid [spawn $sbatch -o $file_out $file_in]
 expect {
-	-re "More .* requested than permitted" {	
+	-re "Node count specification invalid" {
 		send_user "This error was expected, no worries\n\n"
 		exp_continue
 	}	
@@ -128,7 +134,7 @@ $bin_sleep $delay
 
 set sbatch_pid [spawn $sbatch -N1 -o $file_out $file_in]
 expect {
-	-re "More nodes requested than permitted" {
+	-re "Node count specification invalid" {
 		send_user "\nFAILURE: sbatch read from the batch file options"
 		send_user "over writing the commandline options\n"
 		set exit_code 1
diff --git a/testsuite/expect/test17.25 b/testsuite/expect/test17.25
index f09f79ca74044a2a69a01ae5f1b9a3677c8cbb84..4de716e8d909d31148ad64ab2e4cf70311f6107d 100755
--- a/testsuite/expect/test17.25
+++ b/testsuite/expect/test17.25
@@ -37,14 +37,20 @@ set exit_code        0
 set file_in          "test$test_id.input"
 set job_id           0
 set timeout          $max_job_delay
+set account	     "dummy_acct"
 
 print_header $test_id
 
+if {[test_assoc_enforced]} {
+	send_user "\nWARNING: Using users default account instead of $account since associations are enforced.\n"
+	set account [get_default_acct 0]
+}
+
 #
 # Set target environment variables
 #
 global env
-set env(SBATCH_ACCOUNT)      "dummy_acct"
+set env(SBATCH_ACCOUNT)      $account
 set env(SBATCH_DEBUG)        4
 set env(SBATCH_TIMELIMIT)    3
 
@@ -69,7 +75,7 @@ expect {
 		incr matches
 		exp_continue
 	}
-	-re "account *: dummy_acct" {
+	-re "account *: $account" {
 		incr matches
 		exp_continue
 	}
diff --git a/testsuite/expect/test17.28 b/testsuite/expect/test17.28
index ca9c67733c1c49f41402826ba43c4b9509ceed50..47fbd24350165c4683c2b6beadb164803faee1b1 100755
--- a/testsuite/expect/test17.28
+++ b/testsuite/expect/test17.28
@@ -7,7 +7,8 @@
 #          "FAILURE: ..." otherwise with an explanation of the failure, OR
 #          anything else indicates a failure mode that must be investigated.
 ############################################################################
-# Copyright (C) 2005-2006 The Regents of the University of California.
+# Copyright (C) 2005-2007 The Regents of the University of California.
+# Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Danny Auble <da@llnl.gov>
 # LLNL-CODE-402394.
@@ -35,7 +36,6 @@ set test_id     "17.28"
 set exit_code   0
 set file_in     "test$test_id.input"
 set file_out    "test$test_id.output"
-set job_acct    "TEST_ACCT"
 set job_name    "TEST_NAME"
 set delay       1
 
@@ -43,7 +43,6 @@ print_header $test_id
 
 make_bash_script $file_in "
 #SBATCH --job-name=$job_name
-#SBATCH --account=$job_acct
 $bin_sleep $delay
 "
 
@@ -74,10 +73,6 @@ expect {
 		incr matches
 		exp_continue
 	}
-	-re "Account=$job_acct" {
-		incr matches
-		exp_continue
-	}
 	timeout {
 		send_user "\nFAILURE: scontrol not responding\n"
 		set exit_code 1
@@ -87,8 +82,8 @@ expect {
 		wait
 	}
 }
-if {$matches != 2} {
-	send_user "\nFAILURE: did not set job name and account from batch script\n"
+if {$matches != 1} {
+	send_user "\nFAILURE: did not set job name from batch script\n"
 	set exit_code 1
 }
 cancel_job $job_id
@@ -107,7 +102,12 @@ set job_id  0
 set matches 0
 spawn $sbatch -o $file_out $file_in
 expect {
-	-re "More .* requested than permitted" {	
+	-re "More processors requested than permitted" {
+		send_user "This error was expected, no worries\n\n"
+		incr matches
+		exp_continue
+	}
+	-re "Node count specification invalid" {
 		send_user "This error was expected, no worries\n\n"
 		incr matches
 		exp_continue
@@ -143,7 +143,7 @@ $bin_sleep $delay
 set job_id  0
 spawn $sbatch -N1 -o $file_out $file_in
 expect {
-	-re "More nodes requested than permitted" {
+	-re "Node count specification invalid" {
 		send_user "\nFAILURE: sbatch read from the batch file options"
 		send_user "over writing the commandline options\n"
 		set exit_code 1
diff --git a/testsuite/expect/test17.5 b/testsuite/expect/test17.5
index e8a72451a45515991113789eab6bf0c9d5449162..63f72430f1acd753feca20e4119c921650c263d1 100755
--- a/testsuite/expect/test17.5
+++ b/testsuite/expect/test17.5
@@ -8,7 +8,8 @@
 #          "FAILURE: ..." otherwise with an explanation of the failure, OR
 #          anything else indicates a failure mode that must be investigated.
 ############################################################################
-# Copyright (C) 2006 The Regents of the University of California.
+# Copyright (C) 2006-2007 The Regents of the University of California.
+# Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
 # LLNL-CODE-402394.
@@ -66,7 +67,7 @@ make_bash_script $file_script "
 # and confirm their contents
 #
 set job_id 0
-spawn $file_script
+spawn ./$file_script
 expect {
 	-re "Submitted batch job ($number)" {
 		set job_id $expect_out(1,string)
diff --git a/testsuite/expect/test17.8 b/testsuite/expect/test17.8
index d17db4023e63e78c9e4f76fd96cc0a0094d916cf..e181934e69ad382b95bd33cca3af67122a361067 100755
--- a/testsuite/expect/test17.8
+++ b/testsuite/expect/test17.8
@@ -105,6 +105,10 @@ expect {
 		set job_id $expect_out(1,string)
 		exp_continue
 	}
+	-re "time limit exceeds" {
+		send_user "\nWARNING: unable to fully test time limits\n"
+		exit $exit_code
+	}
 	timeout {
 		send_user "\nFAILURE: sbatch not responding\n"
 		set exit_code 1
diff --git a/testsuite/expect/test3.4 b/testsuite/expect/test3.4
index dda1237d00634930374a9d322e66cbfe29ebe603..cac8b13572e588172a04e235d2188e496315794c 100755
--- a/testsuite/expect/test3.4
+++ b/testsuite/expect/test3.4
@@ -7,7 +7,8 @@
 #          "FAILURE: ..." otherwise with an explanation of the failure, OR
 #          anything else indicates a failure mode that must be investigated.
 ############################################################################
-# Copyright (C) 2002 The Regents of the University of California.
+# Copyright (C) 2002-2007 The Regents of the University of California.
+# Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
 # LLNL-CODE-402394.
@@ -90,22 +91,10 @@ expect {
 #
 spawn $scontrol update JobId=$job_id Priority=$new_prio 
 expect {
-	-re "slurm_update error: ($alpha_numeric) ($alpha_numeric)" {
-		set access_err 0
-		set err_msg1 $expect_out(1,string)
-		set err_msg2 $expect_out(2,string)
-		if {[string compare $err_msg1 "Access"] == 0} {
-			set access_err 1
-		}
-		if {[string compare $err_msg2 "denied"] == 0} {
-			set access_err 1
-		}
-		if {$access_err == 1} {
-			send_user "\nWARNING: User not authorized to modify jobs\n"
-			exit $exit_code
-		} else {
-			set authorized 0
-		}
+	-re "slurm_update error: Access.*denied" {
+		send_user "\nWARNING: User not authorized to modify jobs\n"
+		cancel_job $job_id
+		exit $exit_code
 		exp_continue
 	}
 	timeout {
diff --git a/testsuite/expect/test3.7 b/testsuite/expect/test3.7
index 9d8b4f8841c8db67cb34f1725cd64e81493de785..60f8808e4ba4e4776bd21bbcd19782b76a133bf0 100755
--- a/testsuite/expect/test3.7
+++ b/testsuite/expect/test3.7
@@ -8,7 +8,8 @@
 #          "FAILURE: ..." otherwise with an explanation of the failure, OR
 #          anything else indicates a failure mode that must be investigated.
 ############################################################################
-# Copyright (C) 2005-2006 The Regents of the University of California.
+# Copyright (C) 2005-2007 The Regents of the University of California.
+# Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
 # LLNL-CODE-402394.
@@ -208,6 +209,7 @@ if {$not_supported == 1} {
 if {$not_supported == 0} {
 	if {[wait_for_job $job_id2 RUNNING] != 0} {
 		send_user "\nFAILURE: waiting for job $job_id2 to run\n"
+		cancel_job $job_id1
 		cancel_job $job_id2
 		exit 1
 	}
diff --git a/testsuite/expect/test7.10 b/testsuite/expect/test7.10
index bcfb40eb2e791dd94d33994a2bb5a04eb389ba87..322d58a98ec686760d7920446c993b3679c06be6 100755
--- a/testsuite/expect/test7.10
+++ b/testsuite/expect/test7.10
@@ -70,6 +70,7 @@ set srun_pid [spawn $srun -N1 -t1 $bin_id]
 expect {
 	-re "(uid=.*\n)" {
 		send_user "\nFAILURE: srun ran with bogus getuid functions\n"
+		send_user "  Make sure you are not running with auth/none to insure security\n"
 		set exit_code 1
 		exp_continue
 	}
diff --git a/testsuite/expect/test8.6 b/testsuite/expect/test8.6
index 91c0ef46e43f4a8d2687ecc9b21677fa8d88c738..9b78b162fb21e38d1769ecb2da27db6e6c9851ac 100755
--- a/testsuite/expect/test8.6
+++ b/testsuite/expect/test8.6
@@ -7,7 +7,8 @@
 #          "FAILURE: ..." otherwise with an explanation of the failure, OR
 #          anything else indicates a failure mode that must be investigated.
 ############################################################################
-# Copyright (C) 2006 The Regents of the University of California.
+# Copyright (C) 2006-2007 The Regents of the University of California.
+# Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Danny Auble <da@llnl.gov>
 # LLNL-CODE-402394.
@@ -65,6 +66,10 @@ proc run_batch_jobs { node_cnt job_cnt file_in } {
 	for {set inx 0} {$inx < $job_cnt} {incr inx} {
 		set sbatch_pid [spawn $sbatch --output=/dev/null -t5 -N$node_cnt-$node_cnt $file_in]
 		expect {
+			-re "Node count specification invalid" {
+				send_user "This error was expected, no worries\n"
+				return -1
+			}
 			-re "More processors requested than permitted" {
 				send_user "This error was expected, no worries\n"
 				return -1