From 8f1661ad7170b909b54e735827b17ebfa3142fee Mon Sep 17 00:00:00 2001
From: Mehdi Dogguy <mehdi@debian.org>
Date: Mon, 8 Sep 2014 21:31:51 +0200
Subject: [PATCH] Imported Upstream version 1.3.9

---
 META                                          |    4 +-
 NEWS                                          |   84 +-
 auxdir/x_ac_databases.m4                      |    1 +
 auxdir/x_ac_debug.m4                          |   23 +-
 config.h.in                                   |    3 +
 configure                                     |   82 +-
 configure.ac                                  |    3 +-
 contribs/python/hostlist/CHANGES              |   27 +
 contribs/python/hostlist/Makefile.am          |   12 +-
 contribs/python/hostlist/Makefile.in          |   10 +-
 contribs/python/hostlist/PKG-INFO             |   10 +-
 contribs/python/hostlist/README               |   38 +-
 contribs/python/hostlist/hostlist.py          |  106 +-
 contribs/python/hostlist/python-hostlist.spec |   49 +
 contribs/python/hostlist/setup.py             |   13 +-
 doc/html/accounting.shtml                     |   21 +-
 doc/html/bluegene.shtml                       |   14 +-
 doc/html/download.shtml                       |   31 +-
 doc/html/faq.shtml                            |   73 +-
 doc/html/programmer_guide.shtml               |    7 +-
 doc/html/sched_policy.shtml                   |    9 +-
 doc/html/team.shtml                           |    5 +-
 doc/man/Makefile.am                           |   36 +
 doc/man/Makefile.in                           |   36 +
 doc/man/man1/sacct.1                          |   51 +-
 doc/man/man1/sacctmgr.1                       |  597 ++-
 doc/man/man1/salloc.1                         |   14 +-
 doc/man/man1/sbatch.1                         |   17 +-
 doc/man/man1/squeue.1                         |    6 +-
 doc/man/man1/sreport.1                        |  106 +-
 doc/man/man1/srun.1                           |   18 +-
 doc/man/man3/slurm_allocate_resources.3       |  196 +-
 .../man3/slurm_allocate_resources_blocking.3  |    1 +
 doc/man/man3/slurm_allocation_lookup_lite.3   |    1 +
 .../man3/slurm_allocation_msg_thr_create.3    |    1 +
 .../man3/slurm_allocation_msg_thr_destroy.3   |    1 +
 doc/man/man3/slurm_checkpoint_task_complete.3 |    1 +
 doc/man/man3/slurm_clear_trigger.3            |    1 +
 .../slurm_free_job_alloc_info_response_msg.3  |    1 +
 doc/man/man3/slurm_free_job_info_msg.3        |   13 +-
 doc/man/man3/slurm_free_node_info_msg.3       |    1 +
 doc/man/man3/slurm_free_partition_info_msg.3  |    1 +
 doc/man/man3/slurm_free_slurmd_status.3       |    1 +
 doc/man/man3/slurm_free_trigger_msg.3         |    1 +
 doc/man/man3/slurm_get_checkpoint_file_path.3 |    1 +
 doc/man/man3/slurm_get_triggers.3             |    1 +
 doc/man/man3/slurm_job_step_layout_free.3     |    1 +
 doc/man/man3/slurm_job_step_layout_get.3      |    1 +
 doc/man/man3/slurm_kill_job.3                 |   48 +-
 doc/man/man3/slurm_load_slurmd_status.3       |    1 +
 doc/man/man3/slurm_notify_job.3               |    1 +
 doc/man/man3/slurm_ping.3                     |    1 +
 doc/man/man3/slurm_print_slurmd_status.3      |    1 +
 doc/man/man3/slurm_read_hostfile.3            |    1 +
 doc/man/man3/slurm_set_debug_level.3          |    1 +
 doc/man/man3/slurm_set_trigger.3              |    1 +
 doc/man/man3/slurm_signal_job.3               |    1 +
 doc/man/man3/slurm_signal_job_step.3          |    1 +
 doc/man/man3/slurm_slurmd_status.3            |   67 +
 doc/man/man3/slurm_sprint_job_info.3          |    1 +
 doc/man/man3/slurm_sprint_job_step_info.3     |    1 +
 doc/man/man3/slurm_sprint_node_table.3        |    1 +
 doc/man/man3/slurm_sprint_partition_info.3    |    1 +
 doc/man/man3/slurm_step_ctx_create.3          |   40 +-
 doc/man/man3/slurm_step_ctx_create_no_alloc.3 |    1 +
 .../slurm_step_ctx_daemon_per_node_hack.3     |    1 +
 doc/man/man3/slurm_step_ctx_params_t_init.3   |    1 +
 doc/man/man3/slurm_step_launch.3              |   14 +-
 doc/man/man3/slurm_step_launch_fwd_signal.3   |    1 +
 doc/man/man3/slurm_terminate_job.3            |    1 +
 doc/man/man3/slurm_terminate_job_step.3       |    1 +
 doc/man/man3/slurm_trigger.3                  |    0
 doc/man/man5/bluegene.conf.5                  |    3 +
 doc/man/man5/slurm.conf.5                     |  153 +-
 doc/man/man5/slurmdbd.conf.5                  |   50 +-
 doc/man/man5/wiki.conf.5                      |    3 +
 etc/init.d.slurmdbd                           |   64 +-
 slurm.spec                                    |   10 +-
 slurm/slurm.h.in                              |    2 +
 src/api/allocate.c                            |    4 +-
 src/api/pmi_server.c                          |    4 +-
 src/api/step_ctx.c                            |   15 +-
 src/api/step_ctx.h                            |    3 +-
 src/api/step_launch.c                         |   31 +-
 src/api/step_launch.h                         |    3 +-
 src/common/assoc_mgr.c                        |  788 +++-
 src/common/assoc_mgr.h                        |   32 +-
 src/common/env.c                              |   33 +-
 src/common/jobacct_common.c                   |   37 +-
 src/common/jobacct_common.h                   |   23 +-
 src/common/log.c                              |   68 +-
 src/common/node_select.c                      |  183 +-
 src/common/node_select.h                      |   14 +-
 src/common/parse_time.c                       |   31 +-
 src/common/parse_time.h                       |    2 +-
 src/common/plugstack.c                        |    9 +-
 src/common/print_fields.c                     |   33 +-
 src/common/print_fields.h                     |    1 +
 src/common/read_config.c                      |   18 +-
 src/common/read_config.h                      |    8 +-
 src/common/slurm_accounting_storage.c         | 3499 +++++++++++----
 src/common/slurm_accounting_storage.h         |  350 +-
 src/common/slurm_errno.c                      |    2 +-
 src/common/slurm_jobcomp.c                    |    2 -
 src/common/slurm_jobcomp.h                    |    2 -
 src/common/slurm_protocol_api.c               |   26 +-
 src/common/slurm_protocol_api.h               |   15 +-
 src/common/slurm_protocol_defs.c              |   91 +-
 src/common/slurm_protocol_defs.h              |   13 +-
 src/common/slurm_protocol_pack.c              |   27 +-
 src/common/slurmdbd_defs.c                    |  600 ++-
 src/common/slurmdbd_defs.h                    |  229 +-
 src/common/xstring.c                          |   42 +
 src/common/xstring.h                          |    8 +
 src/database/Makefile.am                      |   17 -
 src/database/Makefile.in                      |   60 +-
 src/database/base64.c                         |  199 -
 src/database/base64.h                         |   74 -
 src/database/gold_interface.c                 |  623 ---
 src/database/gold_interface.h                 |  170 -
 src/database/mysql_common.c                   |   49 +-
 src/plugins/accounting_storage/Makefile.am    |    2 +-
 src/plugins/accounting_storage/Makefile.in    |    2 +-
 .../filetxt/accounting_storage_filetxt.c      |   22 +-
 .../accounting_storage/gold/Makefile.am       |   31 -
 .../accounting_storage/gold/Makefile.in       |  575 ---
 .../gold/accounting_storage_gold.c            | 3246 --------------
 .../mysql/accounting_storage_mysql.c          | 3936 ++++++++++++-----
 .../mysql/mysql_jobacct_process.c             |   52 +-
 .../accounting_storage/mysql/mysql_rollup.c   |   25 +-
 .../none/accounting_storage_none.c            |   12 +-
 .../pgsql/accounting_storage_pgsql.c          |  156 +-
 .../pgsql/pgsql_jobacct_process.c             |   33 +-
 .../slurmdbd/accounting_storage_slurmdbd.c    |  309 +-
 src/plugins/jobcomp/filetxt/jobcomp_filetxt.c |   37 +-
 src/plugins/jobcomp/mysql/jobcomp_mysql.c     |  135 +-
 .../jobcomp/mysql/mysql_jobcomp_process.c     |    2 -
 .../jobcomp/mysql/mysql_jobcomp_process.h     |    2 -
 src/plugins/jobcomp/pgsql/jobcomp_pgsql.c     |  134 +-
 .../jobcomp/pgsql/pgsql_jobcomp_process.c     |    3 -
 src/plugins/jobcomp/script/jobcomp_script.c   |   49 +-
 src/plugins/sched/backfill/backfill.c         |  110 +-
 src/plugins/sched/backfill/backfill_wrapper.c |   11 +-
 src/plugins/sched/wiki/msg.c                  |    1 +
 src/plugins/sched/wiki2/job_modify.c          |    2 +-
 src/plugins/sched/wiki2/msg.c                 |    1 +
 .../block_allocator/block_allocator.c         | 1165 ++---
 .../select/bluegene/plugin/bg_job_place.c     |   51 +-
 .../select/bluegene/plugin/bg_job_run.c       |   20 +-
 .../bluegene/plugin/bg_record_functions.h     |    6 +-
 src/plugins/select/bluegene/plugin/bluegene.c |    3 +-
 .../select/bluegene/plugin/dynamic_block.c    |    2 +-
 src/plugins/select/cons_res/select_cons_res.c |    8 +-
 src/plugins/select/linear/select_linear.c     |    6 +-
 src/sacct/options.c                           |   33 +-
 src/sacct/print.c                             |  111 +-
 src/sacct/sacct.c                             |    4 +-
 src/sacct/sacct.h                             |    5 +-
 src/sacctmgr/account_functions.c              |  878 ++--
 src/sacctmgr/association_functions.c          |  461 +-
 src/sacctmgr/cluster_functions.c              |  469 +-
 src/sacctmgr/common.c                         |  533 ++-
 src/sacctmgr/file_functions.c                 |  938 ++--
 src/sacctmgr/qos_functions.c                  |  537 ++-
 src/sacctmgr/sacctmgr.c                       |   94 +-
 src/sacctmgr/sacctmgr.h                       |   11 +-
 src/sacctmgr/txn_functions.c                  |  121 +-
 src/sacctmgr/user_functions.c                 |  912 ++--
 src/salloc/opt.c                              |   51 +-
 src/salloc/salloc.c                           |   19 +-
 src/sbatch/opt.c                              |   14 +-
 src/sbatch/sbatch.c                           |    5 +-
 src/scontrol/update_job.c                     |    2 +-
 src/slurmctld/acct_policy.c                   |  202 +-
 src/slurmctld/acct_policy.h                   |    5 +
 src/slurmctld/agent.c                         |    2 +-
 src/slurmctld/controller.c                    |   67 +-
 src/slurmctld/job_mgr.c                       |  469 +-
 src/slurmctld/job_scheduler.c                 |  352 +-
 src/slurmctld/job_scheduler.h                 |   11 +-
 src/slurmctld/node_mgr.c                      |   85 +-
 src/slurmctld/node_scheduler.c                |  314 +-
 src/slurmctld/partition_mgr.c                 |    9 +-
 src/slurmctld/power_save.c                    |    8 +-
 src/slurmctld/proc_req.c                      |   55 +-
 src/slurmctld/proc_req.h                      |    1 +
 src/slurmctld/read_config.c                   |    4 +-
 src/slurmctld/slurmctld.h                     |   19 +-
 src/slurmctld/step_mgr.c                      |   13 +-
 src/slurmd/slurmd/req.c                       |    8 +-
 src/slurmd/slurmd/slurmd.c                    |    8 +-
 src/slurmd/slurmstepd/mgr.c                   |   41 +-
 src/slurmd/slurmstepd/req.c                   |   17 +-
 src/slurmd/slurmstepd/slurmstepd_job.c        |    8 +-
 src/slurmdbd/proc_req.c                       | 1046 +++--
 src/slurmdbd/proc_req.h                       |   13 +-
 src/slurmdbd/rpc_mgr.c                        |   45 +-
 src/slurmdbd/rpc_mgr.h                        |    3 +-
 src/slurmdbd/slurmdbd.c                       |   10 +-
 src/smap/configure_functions.c                |   95 +-
 src/sreport/cluster_reports.c                 |  815 +++-
 src/sreport/cluster_reports.h                 |    2 +
 src/sreport/common.c                          |  184 +-
 src/sreport/job_reports.c                     |  254 +-
 src/sreport/sreport.c                         |   94 +-
 src/sreport/sreport.h                         |   46 +
 src/sreport/user_reports.c                    |  221 +-
 src/srun/allocate.c                           |   19 +-
 src/srun/opt.c                                |   17 +-
 src/srun/opt.h                                |    6 +-
 src/srun/srun.c                               |   26 +-
 src/sview/job_info.c                          |    2 +-
 testsuite/expect/README                       |    5 +-
 testsuite/expect/globals                      |   51 +
 testsuite/expect/test1.24                     |    2 +-
 testsuite/expect/test1.26                     |   51 +-
 testsuite/expect/test1.37                     |   73 +
 testsuite/expect/test1.47                     |  104 +
 testsuite/expect/test15.12                    |    2 +-
 testsuite/expect/test17.12                    |    2 +-
 testsuite/expect/test21.10                    |  164 +-
 testsuite/expect/test21.11                    |   12 +-
 testsuite/expect/test21.12                    |   15 +-
 testsuite/expect/test21.13                    |   13 +-
 testsuite/expect/test21.14                    |   17 +-
 testsuite/expect/test21.15                    |   10 +-
 testsuite/expect/test21.16                    |   14 +-
 testsuite/expect/test21.17                    |    9 +-
 testsuite/expect/test21.18                    |   13 +-
 testsuite/expect/test21.19                    |   13 +-
 testsuite/expect/test21.20                    | 1147 +++++
 testsuite/expect/test21.5                     |   67 +-
 testsuite/expect/test21.6                     |  137 +-
 testsuite/expect/test21.7                     |   81 +-
 testsuite/expect/test21.8                     |  135 +-
 testsuite/expect/test21.9                     |  135 +-
 testsuite/expect/test7.11                     |    6 +-
 testsuite/expect/test7.11.prog.c              |    9 +
 testsuite/expect/test7.7                      |    2 +-
 testsuite/expect/test7.7.prog.c               |   94 +-
 testsuite/expect/test7.8                      |    2 +-
 testsuite/expect/test9.8                      |   23 +-
 242 files changed, 20026 insertions(+), 11966 deletions(-)
 create mode 100644 contribs/python/hostlist/CHANGES
 create mode 100644 contribs/python/hostlist/python-hostlist.spec
 create mode 100644 doc/man/man3/slurm_allocate_resources_blocking.3
 create mode 100644 doc/man/man3/slurm_allocation_lookup_lite.3
 create mode 100644 doc/man/man3/slurm_allocation_msg_thr_create.3
 create mode 100644 doc/man/man3/slurm_allocation_msg_thr_destroy.3
 create mode 100644 doc/man/man3/slurm_checkpoint_task_complete.3
 create mode 100644 doc/man/man3/slurm_clear_trigger.3
 create mode 100644 doc/man/man3/slurm_free_job_alloc_info_response_msg.3
 create mode 100644 doc/man/man3/slurm_free_node_info_msg.3
 create mode 100644 doc/man/man3/slurm_free_partition_info_msg.3
 create mode 100644 doc/man/man3/slurm_free_slurmd_status.3
 create mode 100644 doc/man/man3/slurm_free_trigger_msg.3
 create mode 100644 doc/man/man3/slurm_get_checkpoint_file_path.3
 create mode 100644 doc/man/man3/slurm_get_triggers.3
 create mode 100644 doc/man/man3/slurm_job_step_layout_free.3
 create mode 100644 doc/man/man3/slurm_job_step_layout_get.3
 create mode 100644 doc/man/man3/slurm_load_slurmd_status.3
 create mode 100644 doc/man/man3/slurm_notify_job.3
 create mode 100644 doc/man/man3/slurm_ping.3
 create mode 100644 doc/man/man3/slurm_print_slurmd_status.3
 create mode 100644 doc/man/man3/slurm_read_hostfile.3
 create mode 100644 doc/man/man3/slurm_set_debug_level.3
 create mode 100644 doc/man/man3/slurm_set_trigger.3
 create mode 100644 doc/man/man3/slurm_signal_job.3
 create mode 100644 doc/man/man3/slurm_signal_job_step.3
 create mode 100644 doc/man/man3/slurm_slurmd_status.3
 create mode 100644 doc/man/man3/slurm_sprint_job_info.3
 create mode 100644 doc/man/man3/slurm_sprint_job_step_info.3
 create mode 100644 doc/man/man3/slurm_sprint_node_table.3
 create mode 100644 doc/man/man3/slurm_sprint_partition_info.3
 create mode 100644 doc/man/man3/slurm_step_ctx_create_no_alloc.3
 create mode 100644 doc/man/man3/slurm_step_ctx_daemon_per_node_hack.3
 create mode 100644 doc/man/man3/slurm_step_ctx_params_t_init.3
 create mode 100644 doc/man/man3/slurm_step_launch_fwd_signal.3
 create mode 100644 doc/man/man3/slurm_terminate_job.3
 create mode 100644 doc/man/man3/slurm_terminate_job_step.3
 create mode 100644 doc/man/man3/slurm_trigger.3
 delete mode 100644 src/database/base64.c
 delete mode 100644 src/database/base64.h
 delete mode 100644 src/database/gold_interface.c
 delete mode 100644 src/database/gold_interface.h
 delete mode 100644 src/plugins/accounting_storage/gold/Makefile.am
 delete mode 100644 src/plugins/accounting_storage/gold/Makefile.in
 delete mode 100644 src/plugins/accounting_storage/gold/accounting_storage_gold.c
 create mode 100755 testsuite/expect/test1.37
 create mode 100755 testsuite/expect/test1.47
 create mode 100755 testsuite/expect/test21.20

diff --git a/META b/META
index c8a6dd3fd..9af55c73d 100644
--- a/META
+++ b/META
@@ -3,9 +3,9 @@
   Api_revision:  0
   Major:         1
   Meta:          1
-  Micro:         8
+  Micro:         9
   Minor:         3
   Name:          slurm
   Release:       1
   Release_tags:  dist
-  Version:       1.3.8
+  Version:       1.3.9
diff --git a/NEWS b/NEWS
index 2d5dcedbf..75fdbd9b6 100644
--- a/NEWS
+++ b/NEWS
@@ -1,6 +1,80 @@
 This file describes changes in recent versions of SLURM. It primarily
 documents those changes that are of interest to users and admins.
 
+* Changes in SLURM 1.3.10
+=========================
+
+
+* Changes in SLURM 1.3.9
+========================
+ -- Fix jobs being cancelled by ctrl-C to have correct cancelled state in 
+    accounting.
+ -- Slurmdbd will only cache user data, made for faster start up
+ -- Improved support for job steps in FRONT_END systems
+ -- Added support to dump and load association information in the controller
+    on start up if slurmdbd is unresponsive
+ -- BLUEGENE - Added support for sched/backfill plugin
+ -- sched/backfill modified to initiate multiple jobs per cycle.
+ -- Increase buffer size in srun to hold task list expressions. Critical 
+    for jobs with 16k tasks or more.
+ -- Added support for eligible jobs and downed nodes to be sent to accounting
+    from the controller the first time accounting is turned on.
+ -- Correct srun logic to support --tasks-per-node option without task count.
+ -- Logic in place to handle multiple versions of RPCs within the slurmdbd. 
+    THE SLURMDBD MUST BE UPGRADED TO THIS VERSION BEFORE UPGRADING THE 
+    SLURMCTLD OR THEY WILL NOT TALK.  
+    Older versions of the slurmctld will continue to talk to the new slurmdbd.
+ -- Add support for new job dependency type: singleton. Only one job from a 
+    given user with a given name will execute with this dependency type.
+    From Matthieu Hautreux, CEA.
+ -- Updated contribs/python/hostlist to version 1.3: See "CHANGES" file in
+    that directory for details. From Kent Engstrom, NSC.
+ -- Add SLURM_JOB_NAME environment variable for jobs submitted using sbatch.
+    In order to prevent the job steps from all having the same name as the 
+    batch job that spawned them, the SLURM_JOB_NAME environment variable is
+    ignored when setting the name of a job step from within an existing 
+    resource allocation.
+ -- For use with sched/wiki2 (Moab only), set salloc's default shell based 
+    upon the user who the job runs as rather than the user submitting the job 
+    (user root).
+ -- Fix to sched/backfill when job specifies no time limit and the partition
+    time limit is INFINITE.
+ -- Validate a job's constraints (node features) at job submit or modification 
+    time. Major re-write of resource allocation logic to support more complex
+    job feature requests.
+ -- For sched/backfill, correct logic to support job constraint specification
+    (e.g. node features).
+ -- Correct power save logic to avoid trying to wake DOWN node. From Matthieu
+    Hautreux, CEA.
+ -- Cancel a job step when one of it's nodes goes DOWN based upon the job 
+    step's --no-kill option, by default the step is killed (previously the 
+    job step remained running even without the --no-kill option).
+ -- Fix bug in logic to remove whitespace from plugstack.conf.
+ -- Add new configuration parameter SallocDefaultCommand to control what 
+    shell that salloc launches by default.
+ -- When enforcing PrivateData configuration parameter, failures return 
+    "Access/permission denied" rather than "Invalid user id".
+ -- From sbatch and srun, if the --dependency option is specified then set 
+    the environment variable SLURM_JOB_DEPENDENCY to the same value.
+ -- In plugin jobcomp/filetxt, use ISO8601 formats for time by default (e.g. 
+    YYYY-MM-DDTHH:MM:SS rather than MM/DD-HH:MM:SS). This restores the default
+    behavior from Slurm version 1.2. Change the value of USE_ISO8601 in
+    src/plusings/jobcomp/filetxt/jobcomp_filetxt.c to revert the behavior.
+ -- Add support for configuration option of ReturnToService=2, which will 
+    return a DOWN to use if the node was previous set DOWN for any reason.
+ -- Removed Gold accounting plugin.  This plugin was to be used for accounting 
+    but has seen not been maintained and is no longer needed.  If using this
+    please contact slurm-dev@llnl.gov.
+ -- When not enforcing associations and running accounting if a user 
+    submits a job to an account that does not have an association on the 
+    cluster the account will be changed to the default account to help 
+    avoid trash in the accounting system.  If the users default account 
+    does not have an association on the cluster the requested account 
+    will be used.
+ -- Add configuration parameter "--have-front-end" to define HAVE_FRONT_END 
+    in config.h and run slurmd only on a front end (suitable only for SLURM
+    development and testing).
+
 * Changes in SLURM 1.3.8
 ========================
  -- Added PrivateData flags for Users, Usage, and Accounts to Accounting. 
@@ -469,12 +543,20 @@ documents those changes that are of interest to users and admins.
     Moved existing digital signature logic into new plugin: crypto/openssl.
     Added new support for crypto/munge (available with GPL license).
 
+* Changes in SLURM 1.2.36
+=========================
+ -- For spank_get_item(S_JOB_ARGV) for batch job with script input via STDIN,
+    set argc value to 1 (rather than 2, argv[0] still set to path of generated
+    script).
+
 * Changes in SLURM 1.2.35
 =========================
  -- Permit SPANK plugins to dynamically register options at runtime base upon
     configuration or other runtime checks.
  -- Add "include" keywork to SPANK plugstack.conf file to optionally include
     other configuration files or directories of configuration files.
+ -- Srun to wait indefinitely for resource allocation to be made. Used to
+    abort after two minutes.
 
 * Changes in SLURM 1.2.34
 =========================
@@ -3462,4 +3544,4 @@ documents those changes that are of interest to users and admins.
  -- Change directory to /tmp in slurmd if daemonizing.
  -- Logfiles are reopened on reconfigure.
  
-$Id: NEWS 14961 2008-09-03 17:38:39Z da $
+$Id: NEWS 15393 2008-10-13 21:02:25Z da $
diff --git a/auxdir/x_ac_databases.m4 b/auxdir/x_ac_databases.m4
index 776239b8e..01a3d089e 100644
--- a/auxdir/x_ac_databases.m4
+++ b/auxdir/x_ac_databases.m4
@@ -93,6 +93,7 @@ AC_DEFUN([X_AC_DATABASES],
             				AC_MSG_RESULT([MySQL (non-threaded) test program built properly.])
             				AC_SUBST(MYSQL_LIBS)
 					AC_SUBST(MYSQL_CFLAGS)
+					AC_DEFINE(MYSQL_NOT_THREAD_SAFE, 1, [Define to 1 if with non thread-safe code])
 					AC_DEFINE(HAVE_MYSQL, 1, [Define to 1 if using MySQL libaries])
 				else
 					MYSQL_CFLAGS=""
diff --git a/auxdir/x_ac_debug.m4 b/auxdir/x_ac_debug.m4
index 4d33ca307..1f5a37fbd 100644
--- a/auxdir/x_ac_debug.m4
+++ b/auxdir/x_ac_debug.m4
@@ -1,5 +1,5 @@
 ##*****************************************************************************
-#  $Id: x_ac_debug.m4 8192 2006-05-25 00:15:05Z morrone $
+#  $Id: x_ac_debug.m4 15332 2008-10-07 20:08:18Z jette $
 ##*****************************************************************************
 #  AUTHOR:
 #    Chris Dunlap <cdunlap@llnl.gov>
@@ -8,8 +8,8 @@
 #    X_AC_DEBUG
 #
 #  DESCRIPTION:
-#    Add support for the "--enable-debug" and "--enable-memory-leak-debug"
-#    configure script options.
+#    Add support for the "--enable-debug", "--enable-memory-leak-debug",
+#    and "--enable-front-end" configure script options.
 #    If debugging is enabled, CFLAGS will be prepended with the debug flags.
 #    The NDEBUG macro (used by assert) will also be set accordingly.
 #
@@ -56,5 +56,22 @@ AC_DEFUN([X_AC_DEBUG], [
   fi
   AC_MSG_RESULT([${x_ac_memory_debug=no}])
 
+  AC_MSG_CHECKING([whether to enable slurmd operation on a front-end])
+  AC_ARG_ENABLE(
+    [front-end],
+     AS_HELP_STRING(--enable-front-end, enable slurmd operation on a front-end),
+     [ case "$enableval" in
+        yes) x_ac_front_end=yes ;;
+         no) x_ac_front_end=no ;;
+          *) AC_MSG_RESULT([doh!])
+             AC_MSG_ERROR([bad value "$enableval" for --enable-front-end]) ;;
+      esac
+    ]
+  )
+  if test "$x_ac_front_end" = yes; then
+    AC_DEFINE(HAVE_FRONT_END, 1, [Define to 1 if running slurmd on front-end only])
+  fi
+  AC_MSG_RESULT([${x_ac_front_end=no}])
+
   ]
 )
diff --git a/config.h.in b/config.h.in
index a13263699..0c6cf45fe 100644
--- a/config.h.in
+++ b/config.h.in
@@ -273,6 +273,9 @@
 /* Enable multiple slurmd on one node */
 #undef MULTIPLE_SLURMD
 
+/* Define to 1 if with non thread-safe code */
+#undef MYSQL_NOT_THREAD_SAFE
+
 /* Define to 1 if you are building a production release. */
 #undef NDEBUG
 
diff --git a/configure b/configure
index c052058b6..bd46c5d6a 100755
--- a/configure
+++ b/configure
@@ -1577,6 +1577,7 @@ Optional Features:
   --enable-debug          enable debugging code for development
   --enable-memory-leak-debug
                           enable memory leak debugging code for development
+  --enable-front-end      enable slurmd operation on a front-end
   --enable-load-env-no-login
                           enable --get-user-env option to load user
                           environment without login
@@ -7119,7 +7120,7 @@ ia64-*-hpux*)
   ;;
 *-*-irix6*)
   # Find out which ABI we are using.
-  echo '#line 7122 "configure"' > conftest.$ac_ext
+  echo '#line 7123 "configure"' > conftest.$ac_ext
   if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
   (eval $ac_compile) 2>&5
   ac_status=$?
@@ -9225,11 +9226,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:9228: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:9229: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>conftest.err)
    ac_status=$?
    cat conftest.err >&5
-   echo "$as_me:9232: \$? = $ac_status" >&5
+   echo "$as_me:9233: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s "$ac_outfile"; then
      # The compiler can only warn and ignore the option if not recognized
      # So say no if there are warnings other than the usual output.
@@ -9515,11 +9516,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:9518: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:9519: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>conftest.err)
    ac_status=$?
    cat conftest.err >&5
-   echo "$as_me:9522: \$? = $ac_status" >&5
+   echo "$as_me:9523: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s "$ac_outfile"; then
      # The compiler can only warn and ignore the option if not recognized
      # So say no if there are warnings other than the usual output.
@@ -9619,11 +9620,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:9622: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:9623: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>out/conftest.err)
    ac_status=$?
    cat out/conftest.err >&5
-   echo "$as_me:9626: \$? = $ac_status" >&5
+   echo "$as_me:9627: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s out/conftest2.$ac_objext
    then
      # The compiler can only warn and ignore the option if not recognized
@@ -11996,7 +11997,7 @@ else
   lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
   lt_status=$lt_dlunknown
   cat > conftest.$ac_ext <<EOF
-#line 11999 "configure"
+#line 12000 "configure"
 #include "confdefs.h"
 
 #if HAVE_DLFCN_H
@@ -12096,7 +12097,7 @@ else
   lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
   lt_status=$lt_dlunknown
   cat > conftest.$ac_ext <<EOF
-#line 12099 "configure"
+#line 12100 "configure"
 #include "confdefs.h"
 
 #if HAVE_DLFCN_H
@@ -14497,11 +14498,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:14500: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:14501: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>conftest.err)
    ac_status=$?
    cat conftest.err >&5
-   echo "$as_me:14504: \$? = $ac_status" >&5
+   echo "$as_me:14505: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s "$ac_outfile"; then
      # The compiler can only warn and ignore the option if not recognized
      # So say no if there are warnings other than the usual output.
@@ -14601,11 +14602,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:14604: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:14605: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>out/conftest.err)
    ac_status=$?
    cat out/conftest.err >&5
-   echo "$as_me:14608: \$? = $ac_status" >&5
+   echo "$as_me:14609: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s out/conftest2.$ac_objext
    then
      # The compiler can only warn and ignore the option if not recognized
@@ -16199,11 +16200,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:16202: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:16203: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>conftest.err)
    ac_status=$?
    cat conftest.err >&5
-   echo "$as_me:16206: \$? = $ac_status" >&5
+   echo "$as_me:16207: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s "$ac_outfile"; then
      # The compiler can only warn and ignore the option if not recognized
      # So say no if there are warnings other than the usual output.
@@ -16303,11 +16304,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:16306: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:16307: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>out/conftest.err)
    ac_status=$?
    cat out/conftest.err >&5
-   echo "$as_me:16310: \$? = $ac_status" >&5
+   echo "$as_me:16311: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s out/conftest2.$ac_objext
    then
      # The compiler can only warn and ignore the option if not recognized
@@ -18523,11 +18524,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:18526: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:18527: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>conftest.err)
    ac_status=$?
    cat conftest.err >&5
-   echo "$as_me:18530: \$? = $ac_status" >&5
+   echo "$as_me:18531: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s "$ac_outfile"; then
      # The compiler can only warn and ignore the option if not recognized
      # So say no if there are warnings other than the usual output.
@@ -18813,11 +18814,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:18816: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:18817: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>conftest.err)
    ac_status=$?
    cat conftest.err >&5
-   echo "$as_me:18820: \$? = $ac_status" >&5
+   echo "$as_me:18821: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s "$ac_outfile"; then
      # The compiler can only warn and ignore the option if not recognized
      # So say no if there are warnings other than the usual output.
@@ -18917,11 +18918,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:18920: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:18921: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>out/conftest.err)
    ac_status=$?
    cat out/conftest.err >&5
-   echo "$as_me:18924: \$? = $ac_status" >&5
+   echo "$as_me:18925: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s out/conftest2.$ac_objext
    then
      # The compiler can only warn and ignore the option if not recognized
@@ -25632,6 +25633,11 @@ echo "${ECHO_T}MySQL (non-threaded) test program built properly." >&6; }
 
 
 
+cat >>confdefs.h <<\_ACEOF
+#define MYSQL_NOT_THREAD_SAFE 1
+_ACEOF
+
+
 cat >>confdefs.h <<\_ACEOF
 #define HAVE_MYSQL 1
 _ACEOF
@@ -25989,6 +25995,33 @@ _ACEOF
   { echo "$as_me:$LINENO: result: ${x_ac_memory_debug=no}" >&5
 echo "${ECHO_T}${x_ac_memory_debug=no}" >&6; }
 
+  { echo "$as_me:$LINENO: checking whether to enable slurmd operation on a front-end" >&5
+echo $ECHO_N "checking whether to enable slurmd operation on a front-end... $ECHO_C" >&6; }
+  # Check whether --enable-front-end was given.
+if test "${enable_front_end+set}" = set; then
+  enableval=$enable_front_end;  case "$enableval" in
+        yes) x_ac_front_end=yes ;;
+         no) x_ac_front_end=no ;;
+          *) { echo "$as_me:$LINENO: result: doh!" >&5
+echo "${ECHO_T}doh!" >&6; }
+             { { echo "$as_me:$LINENO: error: bad value \"$enableval\" for --enable-front-end" >&5
+echo "$as_me: error: bad value \"$enableval\" for --enable-front-end" >&2;}
+   { (exit 1); exit 1; }; } ;;
+      esac
+
+
+fi
+
+  if test "$x_ac_front_end" = yes; then
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_FRONT_END 1
+_ACEOF
+
+  fi
+  { echo "$as_me:$LINENO: result: ${x_ac_front_end=no}" >&5
+echo "${ECHO_T}${x_ac_front_end=no}" >&6; }
+
 
 
  if test "x$ac_debug" = "xtrue"; then
@@ -27196,7 +27229,7 @@ _ACEOF
 
 
 
-ac_config_files="$ac_config_files Makefile config.xml auxdir/Makefile contribs/Makefile contribs/perlapi/Makefile contribs/perlapi/libslurm-perl/Makefile.PL contribs/torque/Makefile contribs/phpext/Makefile contribs/phpext/slurm_php/config.m4 contribs/python/Makefile contribs/python/hostlist/Makefile contribs/python/hostlist/test/Makefile src/Makefile src/api/Makefile src/common/Makefile src/database/Makefile src/sacct/Makefile src/sacctmgr/Makefile src/sreport/Makefile src/sstat/Makefile src/salloc/Makefile src/sbatch/Makefile src/sattach/Makefile src/srun/Makefile src/slurmd/Makefile src/slurmd/slurmd/Makefile src/slurmd/slurmstepd/Makefile src/slurmdbd/Makefile src/slurmctld/Makefile src/sbcast/Makefile src/scontrol/Makefile src/scancel/Makefile src/squeue/Makefile src/sinfo/Makefile src/smap/Makefile src/strigger/Makefile src/sview/Makefile src/plugins/Makefile src/plugins/accounting_storage/Makefile src/plugins/accounting_storage/filetxt/Makefile src/plugins/accounting_storage/gold/Makefile src/plugins/accounting_storage/mysql/Makefile src/plugins/accounting_storage/pgsql/Makefile src/plugins/accounting_storage/none/Makefile src/plugins/accounting_storage/slurmdbd/Makefile src/plugins/auth/Makefile src/plugins/auth/authd/Makefile src/plugins/auth/munge/Makefile src/plugins/auth/none/Makefile src/plugins/checkpoint/Makefile src/plugins/checkpoint/aix/Makefile src/plugins/checkpoint/none/Makefile src/plugins/checkpoint/ompi/Makefile src/plugins/checkpoint/xlch/Makefile src/plugins/crypto/Makefile src/plugins/crypto/munge/Makefile src/plugins/crypto/openssl/Makefile src/plugins/jobacct_gather/Makefile src/plugins/jobacct_gather/linux/Makefile src/plugins/jobacct_gather/aix/Makefile src/plugins/jobacct_gather/none/Makefile src/plugins/jobcomp/Makefile src/plugins/jobcomp/filetxt/Makefile src/plugins/jobcomp/none/Makefile src/plugins/jobcomp/script/Makefile src/plugins/jobcomp/mysql/Makefile src/plugins/jobcomp/pgsql/Makefile src/plugins/proctrack/Makefile src/plugins/proctrack/aix/Makefile src/plugins/proctrack/pgid/Makefile src/plugins/proctrack/linuxproc/Makefile src/plugins/proctrack/rms/Makefile src/plugins/proctrack/sgi_job/Makefile src/plugins/sched/Makefile src/plugins/sched/backfill/Makefile src/plugins/sched/builtin/Makefile src/plugins/sched/gang/Makefile src/plugins/sched/hold/Makefile src/plugins/sched/wiki/Makefile src/plugins/sched/wiki2/Makefile src/plugins/select/Makefile src/plugins/select/bluegene/Makefile src/plugins/select/bluegene/block_allocator/Makefile src/plugins/select/bluegene/plugin/Makefile src/plugins/select/linear/Makefile src/plugins/select/cons_res/Makefile src/plugins/switch/Makefile src/plugins/switch/elan/Makefile src/plugins/switch/none/Makefile src/plugins/switch/federation/Makefile src/plugins/mpi/Makefile src/plugins/mpi/mpich1_p4/Makefile src/plugins/mpi/mpich1_shmem/Makefile src/plugins/mpi/mpichgm/Makefile src/plugins/mpi/mpichmx/Makefile src/plugins/mpi/mvapich/Makefile src/plugins/mpi/lam/Makefile src/plugins/mpi/none/Makefile src/plugins/mpi/openmpi/Makefile src/plugins/task/Makefile src/plugins/task/affinity/Makefile src/plugins/task/none/Makefile doc/Makefile doc/man/Makefile doc/html/Makefile doc/html/configurator.html testsuite/Makefile testsuite/expect/Makefile testsuite/slurm_unit/Makefile testsuite/slurm_unit/common/Makefile testsuite/slurm_unit/slurmctld/Makefile testsuite/slurm_unit/slurmd/Makefile testsuite/slurm_unit/api/Makefile testsuite/slurm_unit/api/manual/Makefile"
+ac_config_files="$ac_config_files Makefile config.xml auxdir/Makefile contribs/Makefile contribs/perlapi/Makefile contribs/perlapi/libslurm-perl/Makefile.PL contribs/torque/Makefile contribs/phpext/Makefile contribs/phpext/slurm_php/config.m4 contribs/python/Makefile contribs/python/hostlist/Makefile contribs/python/hostlist/test/Makefile src/Makefile src/api/Makefile src/common/Makefile src/database/Makefile src/sacct/Makefile src/sacctmgr/Makefile src/sreport/Makefile src/sstat/Makefile src/salloc/Makefile src/sbatch/Makefile src/sattach/Makefile src/srun/Makefile src/slurmd/Makefile src/slurmd/slurmd/Makefile src/slurmd/slurmstepd/Makefile src/slurmdbd/Makefile src/slurmctld/Makefile src/sbcast/Makefile src/scontrol/Makefile src/scancel/Makefile src/squeue/Makefile src/sinfo/Makefile src/smap/Makefile src/strigger/Makefile src/sview/Makefile src/plugins/Makefile src/plugins/accounting_storage/Makefile src/plugins/accounting_storage/filetxt/Makefile src/plugins/accounting_storage/mysql/Makefile src/plugins/accounting_storage/pgsql/Makefile src/plugins/accounting_storage/none/Makefile src/plugins/accounting_storage/slurmdbd/Makefile src/plugins/auth/Makefile src/plugins/auth/authd/Makefile src/plugins/auth/munge/Makefile src/plugins/auth/none/Makefile src/plugins/checkpoint/Makefile src/plugins/checkpoint/aix/Makefile src/plugins/checkpoint/none/Makefile src/plugins/checkpoint/ompi/Makefile src/plugins/checkpoint/xlch/Makefile src/plugins/crypto/Makefile src/plugins/crypto/munge/Makefile src/plugins/crypto/openssl/Makefile src/plugins/jobacct_gather/Makefile src/plugins/jobacct_gather/linux/Makefile src/plugins/jobacct_gather/aix/Makefile src/plugins/jobacct_gather/none/Makefile src/plugins/jobcomp/Makefile src/plugins/jobcomp/filetxt/Makefile src/plugins/jobcomp/none/Makefile src/plugins/jobcomp/script/Makefile src/plugins/jobcomp/mysql/Makefile src/plugins/jobcomp/pgsql/Makefile src/plugins/proctrack/Makefile src/plugins/proctrack/aix/Makefile src/plugins/proctrack/pgid/Makefile src/plugins/proctrack/linuxproc/Makefile src/plugins/proctrack/rms/Makefile src/plugins/proctrack/sgi_job/Makefile src/plugins/sched/Makefile src/plugins/sched/backfill/Makefile src/plugins/sched/builtin/Makefile src/plugins/sched/gang/Makefile src/plugins/sched/hold/Makefile src/plugins/sched/wiki/Makefile src/plugins/sched/wiki2/Makefile src/plugins/select/Makefile src/plugins/select/bluegene/Makefile src/plugins/select/bluegene/block_allocator/Makefile src/plugins/select/bluegene/plugin/Makefile src/plugins/select/linear/Makefile src/plugins/select/cons_res/Makefile src/plugins/switch/Makefile src/plugins/switch/elan/Makefile src/plugins/switch/none/Makefile src/plugins/switch/federation/Makefile src/plugins/mpi/Makefile src/plugins/mpi/mpich1_p4/Makefile src/plugins/mpi/mpich1_shmem/Makefile src/plugins/mpi/mpichgm/Makefile src/plugins/mpi/mpichmx/Makefile src/plugins/mpi/mvapich/Makefile src/plugins/mpi/lam/Makefile src/plugins/mpi/none/Makefile src/plugins/mpi/openmpi/Makefile src/plugins/task/Makefile src/plugins/task/affinity/Makefile src/plugins/task/none/Makefile doc/Makefile doc/man/Makefile doc/html/Makefile doc/html/configurator.html testsuite/Makefile testsuite/expect/Makefile testsuite/slurm_unit/Makefile testsuite/slurm_unit/common/Makefile testsuite/slurm_unit/slurmctld/Makefile testsuite/slurm_unit/slurmd/Makefile testsuite/slurm_unit/api/Makefile testsuite/slurm_unit/api/manual/Makefile"
 
 
 cat >confcache <<\_ACEOF
@@ -27952,7 +27985,6 @@ do
     "src/plugins/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/Makefile" ;;
     "src/plugins/accounting_storage/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/accounting_storage/Makefile" ;;
     "src/plugins/accounting_storage/filetxt/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/accounting_storage/filetxt/Makefile" ;;
-    "src/plugins/accounting_storage/gold/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/accounting_storage/gold/Makefile" ;;
     "src/plugins/accounting_storage/mysql/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/accounting_storage/mysql/Makefile" ;;
     "src/plugins/accounting_storage/pgsql/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/accounting_storage/pgsql/Makefile" ;;
     "src/plugins/accounting_storage/none/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/accounting_storage/none/Makefile" ;;
diff --git a/configure.ac b/configure.ac
index cddb33cde..3c84ab00f 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1,4 +1,4 @@
-# $Id: configure.ac 14645 2008-07-28 20:32:12Z jette $
+# $Id: configure.ac 15324 2008-10-07 00:16:53Z da $
 # This file is to be processed with autoconf to generate a configure script
 
 dnl Prologue
@@ -309,7 +309,6 @@ AC_CONFIG_FILES([Makefile
 		 src/plugins/Makefile
 		 src/plugins/accounting_storage/Makefile
 		 src/plugins/accounting_storage/filetxt/Makefile
-		 src/plugins/accounting_storage/gold/Makefile
 		 src/plugins/accounting_storage/mysql/Makefile
 		 src/plugins/accounting_storage/pgsql/Makefile
 		 src/plugins/accounting_storage/none/Makefile
diff --git a/contribs/python/hostlist/CHANGES b/contribs/python/hostlist/CHANGES
new file mode 100644
index 000000000..292217818
--- /dev/null
+++ b/contribs/python/hostlist/CHANGES
@@ -0,0 +1,27 @@
+Version 1.3 (2008-09-30)
+
+    Add -s/--separator, -p/--prepend, -a/--append and --version
+    options contributed by Pär Andersson at NSC.
+
+    Let -e be the short form of the --expand option (-w is now
+    deprecated).
+
+    Add a manual page for hostlist(1).
+
+Version 1.2 (2008-09-18)
+
+    Add "--prefix /usr" in the installation script of the spec file
+    (needed on SUSE Linux where the default is /usr/local).
+
+Version 1.1 (2008-09-17)
+
+    Move the command line utility to a separate 'hostlist' command.
+
+    Provide a python-hostlist.spec file to build RPM packages.
+    Inspired by a contribution by Dr. Holger Obermaier at
+    Rechenzentrum, Universität Karlsruhe.
+
+Version 1.0 (2008-07-25)
+
+    Initial version.
+
diff --git a/contribs/python/hostlist/Makefile.am b/contribs/python/hostlist/Makefile.am
index 244712630..72676f515 100644
--- a/contribs/python/hostlist/Makefile.am
+++ b/contribs/python/hostlist/Makefile.am
@@ -1,8 +1,10 @@
 SUBDIRS = test
 
-EXTRA_DIST = 		\
-	COPYING		\
-	hostlist.py	\
-	PKG-INFO	\
-	README		\
+EXTRA_DIST = 			\
+	CHANGES			\
+	COPYING			\
+	hostlist.py		\
+	PKG-INFO		\
+	python-hostlist.spec	\
+	README			\
 	setup.py
diff --git a/contribs/python/hostlist/Makefile.in b/contribs/python/hostlist/Makefile.in
index 064125ec9..64645d4e9 100644
--- a/contribs/python/hostlist/Makefile.in
+++ b/contribs/python/hostlist/Makefile.in
@@ -248,10 +248,12 @@ top_builddir = @top_builddir@
 top_srcdir = @top_srcdir@
 SUBDIRS = test
 EXTRA_DIST = \
-	COPYING		\
-	hostlist.py	\
-	PKG-INFO	\
-	README		\
+	CHANGES			\
+	COPYING			\
+	hostlist.py		\
+	PKG-INFO		\
+	python-hostlist.spec	\
+	README			\
 	setup.py
 
 all: all-recursive
diff --git a/contribs/python/hostlist/PKG-INFO b/contribs/python/hostlist/PKG-INFO
index 79b0649c9..cf6353023 100644
--- a/contribs/python/hostlist/PKG-INFO
+++ b/contribs/python/hostlist/PKG-INFO
@@ -1,10 +1,16 @@
 Metadata-Version: 1.0
 Name: python-hostlist
-Version: 1.0
+Version: 1.3
 Summary: Python module for hostlist handling
 Home-page: http://www.nsc.liu.se/~kent/python-hostlist/
 Author: Kent Engström
 Author-email: kent@nsc.liu.se
 License: GPL2+
-Description: The hostlist.py module knows how to expand and collect LLNL hostlist expressions.
+Description: The hostlist.py module knows how to expand and collect hostlist expressions.
 Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Science/Research
+Classifier: Intended Audience :: System Administrators
+Classifier: License :: OSI Approved :: GNU General Public License (GPL)
+Classifier: Topic :: System :: Clustering
+Classifier: Topic :: System :: Systems Administration
diff --git a/contribs/python/hostlist/README b/contribs/python/hostlist/README
index 03fa29342..933cd164b 100644
--- a/contribs/python/hostlist/README
+++ b/contribs/python/hostlist/README
@@ -1,5 +1,5 @@
-The Python module hostlist.py knows how to expand and collect
-LLNL hostlist expressions. Example:
+The Python module hostlist.py knows how to expand and collect hostlist
+expressions. Example:
 
   % python
   Python 2.5.1 (r251:54863, Jul 10 2008, 17:24:48) 
@@ -17,14 +17,14 @@ LLNL hostlist expressions. Example:
 Bad hostlists or hostnames will result in the hostlist.BadHostlist
 exception being raised.
 
-If you invoke hostlist.py from the command line, it doubles as a utility
-for doing set operations on hostlists. Example:
+The 'hostlist' command is provided to expand/collect hostlists and
+perform set operations on them. Example:
 
-  % ./hostlist.py n[1-10] n[5-20]
+  % hostlist n[1-10] n[5-20]
   n[1-20]
-  % ./hostlist.py --difference n[1-10] n[5-20]
+  % hostlist --difference n[1-10] n[5-20]
   n[1-4]
-  % ./hostlist.py --expand --intersection n[1-10] n[5-20] 
+  % hostlist --expand --intersection n[1-10] n[5-20] 
   n5
   n6
   n7
@@ -32,12 +32,28 @@ for doing set operations on hostlists. Example:
   n9
   n10
 
-Install by running
+Install directly by running
 
   python setup.py build   (as yourself)
   python setup.py install (as root) 
 
-or just copy the hostlist.py file to an appropriate place.
+or just copy the hostlist.py, hostlist and hostlist.1 files to appropriate places.
 
-If you have questions, suggestions, bug reports or patches, please send them
-to kent@nsc.liu.se.
+Build RPM packages by running:
+
+  rpmbuild -ta python-hostlist-VERSION.tar.gz
+
+If you do not have the tar archive, build RPM packages by running:
+
+  python setup.py sdist
+  cp dist/python-hostlist-VERSION.tar.gz ~/rpmbuild/SOURCES
+  rpmbuild -ba python-hostlist.spec  
+
+You will find new releases at:
+
+  http://www.nsc.liu.se/~kent/python-hostlist/
+
+If you have questions, suggestions, bug reports or patches, please
+send them to:
+
+  kent@nsc.liu.se.
diff --git a/contribs/python/hostlist/hostlist.py b/contribs/python/hostlist/hostlist.py
index 9e6a2c594..f2c2adac1 100755
--- a/contribs/python/hostlist/hostlist.py
+++ b/contribs/python/hostlist/hostlist.py
@@ -1,12 +1,11 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 #
-# Hostlist library and utility
+# Hostlist library
 #
-# Version 1.0
-#
-# Copyright (C) 2008 Kent Engström <kent@nsc.liu.se> and
-#                    Thomas Bellman <bellman@nsc.liu.se>,
+# Copyright (C) 2008 Kent Engström <kent@nsc.liu.se>,
+#                    Thomas Bellman <bellman@nsc.liu.se> and
+#                    Pär Andersson <paran@nsc.liu.se>,
 #                    National Supercomputer Centre
 # 
 # This program is free software; you can redistribute it and/or modify
@@ -24,8 +23,18 @@
 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
 # 02110-1301, USA.
 
-# WARNING: The behaviour in corner cases have not been compared for
-# compatibility with pdsh/dshbak/SLURM et al.
+"""Handle hostlist expressions.
+
+This module provides operations to expand and collect hostlist
+expressions.
+
+The hostlist expression syntax is the same as in several programs
+developed at LLNL (https://computing.llnl.gov/linux/). However in
+corner cases the behaviour of this module have not been compared for
+compatibility with pdsh/dshbak/SLURM et al.
+"""
+
+__version__ = "1.3"
 
 import re
 import itertools
@@ -39,7 +48,7 @@ MAX_SIZE = 100000
 # Hostlist expansion
 
 def expand_hostlist(hostlist, allow_duplicates=False, sort=False):
-    """Expand a Livermore hostlist string to a Python list.
+    """Expand a hostlist expression string to a Python list.
 
     Exemple: expand_hostlist("n[9-11],d[01-02]") ==> 
              ['n9', 'n10', 'n11', 'd01', 'd02']
@@ -344,83 +353,10 @@ def numeric_sort_key(x):
 
     
 #
-# The library stuff ends here. Now lets do something useful
-# when called from the command line too :-)
+# Keep this part to tell users where the command line interface went
 #
 
 if __name__ == '__main__':
-    import optparse
-    import sys
-    import operator
-    import os
-
-    def func_union(args):
-        return reduce(operator.or_, args)
-
-    def func_intersection(args):
-        return reduce(operator.and_, args)
-
-    def func_difference(args):
-        return reduce(operator.sub, args)
-
-    def func_xor(args):
-        return reduce(operator.xor, args)
-
-    op = optparse.OptionParser(usage="usage: %prog [options] {hostlist arguments}")
-    op.add_option("-u", "--union",
-                  action="store_const", dest="func", const=func_union,
-                  default=func_union,
-                  help="compute the union of the hostlist arguments (default)")
-    op.add_option("-i", "--intersection",
-                  action="store_const", dest="func", const=func_intersection,
-                  help="compute the intersection of the hostlist arguments")
-    op.add_option("-d", "--difference",
-                  action="store_const", dest="func", const=func_difference,
-                  help="compute the difference between the first hostlist argument and the rest")
-    op.add_option("-x", "--symmetric-difference",
-                  action="store_const", dest="func", const=func_xor,
-                  help="compute the symmetric difference between the first hostlist argument and the rest")
-    op.add_option("-w", "--expand",
-                  action="store_true",
-                  help="output the results as an expanded list")
-    op.add_option("-c", "--collapse",
-                  action="store_false", dest="expand",
-                  help="output the results as a hostlist expression (default)")
-    op.add_option("-n", "--count",
-                  action="store_true",
-                  help="output the number of hosts instead of a hostlist")
-    (opts, args) = op.parse_args()
-
-    func = opts.func
-
-    func_args  = []
-
-    try:
-        for a in args:
-            if a == "-":
-                for a in sys.stdin.read().split():
-                    func_args.append(set(expand_hostlist(a)))
-            else:
-                func_args.append(set(expand_hostlist(a)))
-    except BadHostlist, e:
-        sys.stderr.write("Bad hostlist ``%s'' encountered: %s\n"
-                         % ((a,) + e.args))
-        sys.exit(os.EX_DATAERR)
-
-    if not func_args:
-        op.print_help()
-        sys.exit(os.EX_USAGE)
-
-    res = func(func_args)
-
-    if opts.count:
-        print len(res)
-    elif opts.expand:
-        for host in numerically_sorted(res):
-            print host
-    else:
-        try:
-            print collect_hostlist(res)
-        except BadHostlist, e:
-            sys.stderr.write("Bad hostname encountered: %s\n" % e.args)
-            sys.exit(os.EX_DATAERR)
+    import os, sys
+    sys.stderr.write("The command line utility has been moved to a separate 'hostlist' program.\n")
+    sys.exit(os.EX_USAGE)
diff --git a/contribs/python/hostlist/python-hostlist.spec b/contribs/python/hostlist/python-hostlist.spec
new file mode 100644
index 000000000..7f0efc5f3
--- /dev/null
+++ b/contribs/python/hostlist/python-hostlist.spec
@@ -0,0 +1,49 @@
+%{!?python_sitelib: %define python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")}
+
+Name:           python-hostlist
+Version:        1.3
+Release:        1
+Summary:        Python module for hostlist handling
+Vendor:         NSC
+
+Group:          Development/Languages
+License:        GPL2+
+URL:            http://www.nsc.liu.se/~kent/python-hostlist/
+Source0:        http://www.nsc.liu.se/~kent/python-hostlist/%{name}-%{version}.tar.gz
+BuildRoot:      %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
+
+BuildArch:      noarch
+BuildRequires:  python-devel
+
+%description
+The hostlist.py module knows how to expand and collect hostlist
+expressions. The package also includes the 'hostlist' binary which can
+be used to collect/expand hostlists and perform set operations on
+them.
+
+%prep
+%setup -q
+
+
+%build
+%{__python} setup.py build
+
+
+%install
+rm -rf $RPM_BUILD_ROOT
+%{__python} setup.py install -O1 --skip-build --prefix /usr --root $RPM_BUILD_ROOT
+
+ 
+%clean
+rm -rf $RPM_BUILD_ROOT
+
+
+%files
+%defattr(-,root,root,-)
+%doc README
+%doc COPYING
+%doc CHANGES
+%{python_sitelib}/*
+/usr/bin/hostlist
+/usr/share/man/man1/hostlist.1.gz
+%changelog
diff --git a/contribs/python/hostlist/setup.py b/contribs/python/hostlist/setup.py
index 87acae2f0..0afd28097 100644
--- a/contribs/python/hostlist/setup.py
+++ b/contribs/python/hostlist/setup.py
@@ -3,12 +3,21 @@
 from distutils.core import setup
 
 setup(name         = "python-hostlist",
-      version      = "1.0", # Change comment in hostlist.py too!
+      version      = "1.3", # Change in hostlist{,.py,.1}, python-hostlist.spec too!
       description  = "Python module for hostlist handling",
-      long_description = "The hostlist.py module knows how to expand and collect LLNL hostlist expressions.",
+      long_description = "The hostlist.py module knows how to expand and collect hostlist expressions.",
       author       = "Kent Engström",
       author_email = "kent@nsc.liu.se",
       url          = "http://www.nsc.liu.se/~kent/python-hostlist/",
       license      = "GPL2+",
+      classifiers  = ['Development Status :: 5 - Production/Stable',
+                      'Intended Audience :: Science/Research',
+                      'Intended Audience :: System Administrators',
+                      'License :: OSI Approved :: GNU General Public License (GPL)',
+                      'Topic :: System :: Clustering',
+                      'Topic :: System :: Systems Administration',
+                      ],
       py_modules   = ["hostlist"],
+      scripts      = ["hostlist"],
+      data_files   = [("share/man/man1", ["hostlist.1"])],
       )
diff --git a/doc/html/accounting.shtml b/doc/html/accounting.shtml
index 5764cb0da..4f67cb524 100644
--- a/doc/html/accounting.shtml
+++ b/doc/html/accounting.shtml
@@ -175,9 +175,17 @@ the database with.</li>
 <p>While the SlurmDBD will work with a flat text file for recording
 job completions and such this configuration will not allow
 "associations" between a user and account. A database allows such
-a configuration. MySQL is the preferred database, PostgreSQL is
-supported for job and step accounting only. The infrastructure for
-associations is not yet supported. To enable this database support
+a configuration. 
+
+<p>
+<b>MySQL is the preferred database, PostgreSQL is
+supported for job and step accounting only.</b> The infrastructure for
+PostgresSQL for use with associations is not yet supported, meaning
+sacctmgr will not work correcting.  If interested in adding this
+capabilty for PostgresSQL please email slurm-dev@lists.llnl.gov.
+
+<p>
+To enable this database support
 one only needs to have the development package for the database they
 wish to use on the system. The slurm configure script uses
 mysql_config and pg-config to find out the information it needs
@@ -214,7 +222,12 @@ is not in the database, then set this to "1".
 user names and optional partition name.)
 Without AccountingStorageEnforce being set (the default behavior) 
 jobs will be executed based upon policies configured in SLURM on each cluster. 
-This option will prevent users from accessing invalid accounts.</li>
+This option will prevent users from accessing invalid accounts.  
+Setting this to "2" will also cause association limits to be
+enforced.  When set to "1" association limits will not be
+enforced.  It is a good idea to run in this mode when running a
+scheduler on top of slurm, like Moab, that does not update in real
+time their limits per association.</li>
 
 <li><b>AccountingStorageHost</b>: The name or address of the host where 
 SlurmDBD executes</li>
diff --git a/doc/html/bluegene.shtml b/doc/html/bluegene.shtml
index 8a5cc3166..0136108ee 100644
--- a/doc/html/bluegene.shtml
+++ b/doc/html/bluegene.shtml
@@ -272,11 +272,17 @@ etc.).  Sample prolog and epilog scripts follow. </p>
 /usr/sbin/slurm_epilog
 </pre>
 
-<p>Since jobs with different geometries or other characteristics do not interfere 
-with each other's scheduling, backfill scheduling is not presently meaningful.
+<p>Since jobs with different geometries or other characteristics might not 
+interfere with each other, scheduling is somewhat different on a BlueGene 
+system than typical clusters.
 SLURM's builtin scheduler on BlueGene will sort pending jobs and then attempt 
-to schedule all of them in priority order. 
+to schedule <b>all</b> of them in priority order. 
 This essentially functions as if there is a separate queue for each job size.
+SLURM's backfill scheduler on BlueGene will enforce FIFO (first-in first-out)
+scheduling with backfill (lower priority jobs will start early if doing so 
+will not impact the expected initiation time of a higher priority job). 
+As on other systems, effective backfill relies upon users setting reasonable
+job time limits.
 Note that SLURM does support different partitions with an assortment of 
 different scheduling parameters.
 For example, SLURM can have defined a partition for full system jobs that 
@@ -631,6 +637,6 @@ scheduling logic, etc. </p>
  
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 23 April 2008</p>
+<p style="text-align:center;">Last modified 9 September 2008</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/download.shtml b/doc/html/download.shtml
index 0b5b03bd1..61bd587d3 100644
--- a/doc/html/download.shtml
+++ b/doc/html/download.shtml
@@ -22,7 +22,34 @@ https://sourceforge.net/projects/slurm/</a><br>
 SLURM's PAM module has also been packaged for 
 <a href="http://packages.debian.org/src:libpam-slurm">Debian</a> and 
 <a href="http://packages.ubuntu.com/src:libpam-slurm">Ubuntu</a>
-(both named <i>libpam-slurm</i>).</p>
+(both named <i>libpam-slurm</i>).</li>
+
+<li><b>SPANK Plugins</b><br>
+SPANK provides a very generic interface for stackable plug-ins which
+may be used to dynamically modify the job launch code in SLURM. SPANK
+plugins may be built without access to SLURM source code. They need
+only be compiled against SLURM’s spank.h header file, added to the
+SPANK config file plugstack.conf, and they will be loaded at runtime
+during the next job launch. Thus, the SPANK infrastructure provides
+administrators and other developers a low cost, low effort ability to
+dynamically modify the runtime behavior of SLURM job launch.
+As assortment of SPANK plugins are available from<br>
+<a href="http://code.google.com/p/slurm-spank-plugins/">
+http://code.google.com/p/slurm-spank-plugins/</a>.<br>
+The current source for the plugins can be checked out of the subversion
+respository with the following command:<br>
+<i>svn checkout http://slurm-spank-plugins.googlecode.com/svn/trunk/ slurm-plugins</i></li>
+
+<li><b>I/O Watchdog</b><br>
+A facility for monitoring user applications, most notably parallel jobs, 
+for <i>hangs</i> which typically have a side-effect of ceasing all write 
+activity. This faciltiy attempts to monitor all write activity of an 
+application and trigger a set of user-defined actions when write activity 
+as ceased for a configurable period of time. A SPANK plugin is provided
+for use with SLURM. See the README and man page in tha package for more
+details. Download the latest source from:<br>
+<a href="http://io-watchdog.googlecode.com/files/io-watchdog-0.6.tar.bz2">
+http://io-watchdog.googlecode.com/files/io-watchdog-0.6.tar.bz2</a></li>
 
 </ul>
 <h1>Related Software</h1>
@@ -104,6 +131,6 @@ Portable Linux Processor Affinity (PLPA)</a></li>
 
 </ul>
 
-<p style="text-align:center;">Last modified 25 July 2008</p>
+<p style="text-align:center;">Last modified 30 September 2008</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/faq.shtml b/doc/html/faq.shtml
index b9711d0b1..2063ed8f2 100644
--- a/doc/html/faq.shtml
+++ b/doc/html/faq.shtml
@@ -85,8 +85,10 @@ SLURM RPM?</li>
 <li><a href="#slurmdbd">Why should I use the slurmdbd instead of the
 regular database plugins?</li>
 <li><a href="#debug">How can I build SLURM with debugging symbols?</li>
-<li><a href="#state_preserve">How can I easily preserve drained node information 
-between major SLURM updates?</li>
+<li><a href="#state_preserve">How can I easily preserve drained node 
+information between major SLURM updates?</li>
+<li><a href="#health_check">Why doesn't the <i>HealthCheckProgram</i>
+execute on DOWN nodes?</li>
 </ol>
 
 <h2>For Users</h2>
@@ -735,25 +737,7 @@ of the node that it is supposed to serve on the execute line.</li>
 for this due to it's improved support for multiple slurmd daemons.
 See the
 <a href="programmer_guide.shtml#multiple_slurmd_support">Programmers Guide</a>
-for more details about configuring multiple slurmd support.
-<p>In order to emulate a really large cluster, it can be more 
-convenient to use a single <i>slurmd</i> daemon. 
-That daemon will not be able to launch many tasks, but can 
-suffice for developing or testing scheduling software.
-<ol>
-<li>Execute the <i>configure</i> program with your normal options.</li>
-<li>Add the line "<i>#define HAVE_FRONT_END 1</i>" to the resulting
-<i>config.h</i> file.</li>
-<li>Build and install SLURM in the usual manner.</li>
-<li>In <i>slurm.conf</i> define the desired node names (arbitrary
-names used only by SLURM) as <i>NodeName</i> along with the actual
-address of the <b>one</b> physical node in <i>NodeHostname</i>.
-Up to 64k nodes can be configured in this virtual cluster.</li>
-<li>Start your <i>slurmctld</i> and one <i>slurmd</i> daemon.</li>
-<li>Create job allocations as desired, but <b>do not run job steps
-with more than a couple of tasks.</b> Doing so may result in the 
-<i>slurmd</i> daemon exhausting its memory and failing.</li>
-</ol>
+for more details about configuring multiple slurmd support.</p>
 
 <p>In order to emulate a really large cluster, it can be more 
 convenient to use a single <i>slurmd</i> daemon. 
@@ -765,9 +749,9 @@ Doing so may result in the <i>slurmd</i> daemon exhausting its
 memory and failing. 
 <b>Use this method with caution.</b>
 <ol>
-<li>Execute the <i>configure</i> program with your normal options.</li>
-<li>Append the line "<i>#define HAVE_FRONT_END 1</i>" to the resulting
-<i>config.h</i> file.</li>
+<li>Execute the <i>configure</i> program with your normal options
+plus <i>--enable-front-end</i> (this will define HAVE_FRONT_END in
+the resulting <i>config.h</i> file.</li>
 <li>Build and install SLURM in the usual manner.</li>
 <li>In <i>slurm.conf</i> define the desired node names (arbitrary
 names used only by SLURM) as <i>NodeName</i> along with the actual
@@ -781,9 +765,9 @@ Be sure to use the "-c" option when switch from this mode too.</li>
 <li>Create job allocations as desired, but do not run job steps
 with more than a couple of tasks.</li>
 </ol>
+
 <pre>
-$ ./configure --enable-debug --prefix=... --sysconfdir=...
-$ echo "#define HAVE_FRONT_END 1" >>config.h
+$ ./configure --enable-debug --enable-front-end --prefix=... --sysconfdir=...
 $ make install
 $ grep NodeHostName slurm.conf
 <i>NodeName=dummy[1-1200] NodeHostName=localhost NodeAddr=127.0.0.1</i>
@@ -890,7 +874,12 @@ about these options.
 clocks on the cluster?</b></a><br>
 In general, yes. Having inconsistent clocks may cause nodes to 
 be unusable. SLURM log files should contain references to 
-expired credentials.
+expired credentials. For example:
+<pre>
+error: Munge decode failed: Expired credential
+ENCODED: Wed May 12 12:34:56 2008
+DECODED: Wed May 12 12:01:12 2008
+</pre>
 
 <p><a name="cred_invalid"><b>21. Why are &quot;Invalid job credential&quot; 
 errors generated?</b></a><br>
@@ -999,8 +988,36 @@ in a form that can be executed later to restore state.
 sinfo -t drain -h -o "scontrol update nodename='%N' state=drain reason='%E'"
 </pre>
 
+<p><a name="health_check"><b>31. Why doesn't the <i>HealthCheckProgram</i>
+execute on DOWN nodes?</b><br>
+Hierarchical communications are used for sending this message. If there
+are DOWN nodes in the communications hierarchy, messages will need to 
+be re-routed. This limits SLURM's ability to tightly synchroize the 
+execution of the <i>HealthCheckProgram</i> across the cluster, which
+could adversly impact performance of parallel applications. 
+The use of CRON or node startup scripts may be better suited to insure
+that <i>HealthCheckProgram</i> gets executed on nodes that are DOWN
+in SLURM. If you still want to have SLURM try to execute 
+<i>HealthCheckProgram</i> on DOWN nodes, apply the following patch:
+<pre>
+Index: src/slurmctld/ping_nodes.c
+===================================================================
+--- src/slurmctld/ping_nodes.c  (revision 15166)
++++ src/slurmctld/ping_nodes.c  (working copy)
+@@ -283,9 +283,6 @@
+                node_ptr   = &node_record_table_ptr[i];
+                base_state = node_ptr->node_state & NODE_STATE_BASE;
+
+-               if (base_state == NODE_STATE_DOWN)
+-                       continue;
+-
+ #ifdef HAVE_FRONT_END          /* Operate only on front-end */
+                if (i > 0)
+                        continue;
+</pre>
+
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 2 September 2008</p>
+<p style="text-align:center;">Last modified 7 October 2008</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/programmer_guide.shtml b/doc/html/programmer_guide.shtml
index 2ee1d221f..74d13cfd2 100644
--- a/doc/html/programmer_guide.shtml
+++ b/doc/html/programmer_guide.shtml
@@ -170,8 +170,9 @@ Update <b>Makefile.am</b> files as needed then execute
 
 <h2>Tricks of the Trade</h2>
 <h3>HAVE_FRONT_END</h3>
-<p>You can make a single node appear to SLURM as a Linux cluster by manually 
-defining <b>HAVE_FRONT_END</b> to have a non-zero value in the file <b>config.h</b>.
+<p>You can make a single node appear to SLURM as a Linux cluster by running
+<i>configure</i> with the <i>--enable-front-end</i> option. This 
+defines b>HAVE_FRONT_END</b> with a non-zero value in the file <b>config.h</b>.
 All (fake) nodes should be defined in the <b>slurm.conf</b> file.
 These nodes should be configured with a single <b>NodeAddr</b> value
 indicating the node on which single <span class="commandline">slurmd</span> daemon 
@@ -229,6 +230,6 @@ host1> slurmd -N foo21
 
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 20 May 2008</p>
+<p style="text-align:center;">Last modified 7 October 2008</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/sched_policy.shtml b/doc/html/sched_policy.shtml
index b7a23b530..53aed3fd9 100644
--- a/doc/html/sched_policy.shtml
+++ b/doc/html/sched_policy.shtml
@@ -40,7 +40,14 @@ In order to enforce scheduling policy, set the value of
 <b>AccountingStorageEnforce</b> to "1" in <b>slurm.conf</b>.
 This prevents users from running any jobs without an valid
 <i>association</i> record in the database and enforces scheduling 
-policy limits that have been configured.</p>
+policy limits that have been configured.  
+In order to enforce association limits along with scheduling policy,
+set the value of <b>AccountingStorageEnforce</b> to "2"
+in <b>slurm.conf</b>.  When set to "1" association limits will not be
+enforced.  It is a good idea to run in this mode when running a
+scheduler on top of slurm, like Moab, that does not update in real
+time their limits per association.
+</p>
 
 <h2>Tools</h2>
 
diff --git a/doc/html/team.shtml b/doc/html/team.shtml
index f82230ee6..e4c8934f2 100644
--- a/doc/html/team.shtml
+++ b/doc/html/team.shtml
@@ -29,6 +29,7 @@ and a host of others.
 <li>Chuck Clouston (Bull)</li>
 <li>Chris Dunlap (LLNL)</li>
 <li>Joey Ekstrom (LLNL/Bringham Young University)</li>
+<li>Josh England (TGS Management Corporation)</li>
 <li>Kent Engstr&ouml;m  (National Supercomputer Centre, Sweden)</li>
 <li>Jim Garlick (LLNL)</li>
 <li>Didier Gazen (Laboratoire d'Aerologie, France)</li>
@@ -59,11 +60,13 @@ Networking, Italy)</li>
 <li>Federico Sacerdoti (D.E. Shaw)<li>
 <li>Jeff Squyres (LAM MPI)</li>
 <li>Prashanth Tamraparni (HP, India)</li>
+<li>Adam Todorski (Rensselaer Polytechnic Institute)</li>
 <li>Kevin Tew (LLNL/Bringham Young University)</li>
+<li>Tim Wickberg (Rensselaer Polytechnic Institute)</li>
 <li>Jay Windley (Linux NetworX)</li>
 <li>Anne-Marie Wunderlin (Bull)</li>
 </ul>
 
-<p style="text-align:center;">Last modified 28 July 2008</p>
+<p style="text-align:center;">Last modified 10 October 2008</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am
index ecc4cfe72..800536c24 100644
--- a/doc/man/Makefile.am
+++ b/doc/man/Makefile.am
@@ -24,7 +24,11 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_hostlist_destroy.3 \
 	man3/slurm_hostlist_shift.3 \
 	man3/slurm_allocate_resources.3 \
+	man3/slurm_allocate_resources_blocking.3 \
 	man3/slurm_allocation_lookup.3 \
+	man3/slurm_allocation_lookup_lite.3 \
+	man3/slurm_allocation_msg_thr_create.3 \
+	man3/slurm_allocation_msg_thr_destroy.3 \
 	man3/slurm_api_version.3 \
 	man3/slurm_checkpoint_able.3 \
 	man3/slurm_checkpoint_complete.3 \
@@ -34,27 +38,38 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_checkpoint_error.3 \
 	man3/slurm_checkpoint_failed.3 \
 	man3/slurm_checkpoint_restart.3 \
+	man3/slurm_checkpoint_task_complete.3 \
 	man3/slurm_checkpoint_vacate.3 \
+	man3/slurm_clear_trigger.3 \
 	man3/slurm_complete_job.3 \
 	man3/slurm_complete_job_step.3 \
 	man3/slurm_confirm_allocation.3 \
 	man3/slurm_free_ctl_conf.3 \
 	man3/slurm_free_job_info_msg.3 \
+	man3/slurm_free_job_alloc_info_response_msg.3 \
 	man3/slurm_free_job_step_create_response_msg.3 \
 	man3/slurm_free_job_step_info_response_msg.3 \
 	man3/slurm_free_node_info.3 \
+	man3/slurm_free_node_info_msg.3 \
 	man3/slurm_free_partition_info.3 \
+	man3/slurm_free_partition_info_msg.3 \
 	man3/slurm_free_resource_allocation_response_msg.3 \
+	man3/slurm_free_slurmd_status.3 \
 	man3/slurm_free_submit_response_response_msg.3 \
+	man3/slurm_free_trigger_msg.3 \
+	man3/slurm_get_checkpoint_file_path.3 \
 	man3/slurm_get_end_time.3 \
 	man3/slurm_get_errno.3 \
 	man3/slurm_get_job_steps.3 \
 	man3/slurm_get_rem_time.3 \
 	man3/slurm_get_select_jobinfo.3 \
+	man3/slurm_get_triggers.3 \
 	man3/slurm_init_job_desc_msg.3 \
 	man3/slurm_init_part_desc_msg.3 \
 	man3/slurm_job_step_create.3 \
 	man3/slurm_job_step_launch_t_init.3 \
+	man3/slurm_job_step_layout_get.3 \
+	man3/slurm_job_step_layout_free.3 \
 	man3/slurm_job_will_run.3 \
 	man3/slurm_jobinfo_ctx_get.3 \
 	man3/slurm_kill_job.3 \
@@ -64,8 +79,11 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_load_jobs.3 \
 	man3/slurm_load_node.3 \
 	man3/slurm_load_partitions.3 \
+	man3/slurm_load_slurmd_status.3 \
+	man3/slurm_notify_job.3 \
 	man3/slurm_perror.3 \
 	man3/slurm_pid2jobid.3 \
+	man3/slurm_ping.3 \
 	man3/slurm_print_ctl_conf.3 \
 	man3/slurm_print_job_info.3 \
 	man3/slurm_print_job_info_msg.3 \
@@ -75,20 +93,38 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_print_node_table.3 \
 	man3/slurm_print_partition_info.3 \
 	man3/slurm_print_partition_info_msg.3 \
+	man3/slurm_print_slurmd_status.3 \
+	man3/slurm_read_hostfile.3 \
 	man3/slurm_reconfigure.3 \
 	man3/slurm_resume.3 \
 	man3/slurm_requeue.3 \
+	man3/slurm_set_debug_level.3 \
+	man3/slurm_set_trigger.3 \
 	man3/slurm_shutdown.3 \
+	man3/slurm_signal_job.3 \
+	man3/slurm_signal_job_step.3 \
+	man3/slurm_slurmd_status.3 \
+	man3/slurm_sprint_job_info.3 \
+	man3/slurm_sprint_job_step_info.3 \
+	man3/slurm_sprint_node_table.3 \
+	man3/slurm_sprint_partition_info.3 \
 	man3/slurm_step_ctx_create.3 \
+	man3/slurm_step_ctx_create_no_alloc.3 \
+	man3/slurm_step_ctx_daemon_per_node_hack.3 \
 	man3/slurm_step_ctx_destroy.3 \
+	man3/slurm_step_ctx_params_t_init.3 \
 	man3/slurm_step_ctx_get.3 \
 	man3/slurm_step_launch.3 \
+	man3/slurm_step_launch_fwd_signal.3 \
 	man3/slurm_step_launch_abort.3 \
 	man3/slurm_step_launch_wait_finish.3 \
 	man3/slurm_step_launch_wait_start.3 \
 	man3/slurm_strerror.3 \
 	man3/slurm_submit_batch_job.3 \
 	man3/slurm_suspend.3 \
+	man3/slurm_terminate_job.3 \
+	man3/slurm_terminate_job_step.3 \
+	man3/slurm_trigger.3 \
 	man3/slurm_update_job.3 \
 	man3/slurm_update_node.3 \
 	man3/slurm_update_partition.3
diff --git a/doc/man/Makefile.in b/doc/man/Makefile.in
index 7aaf763cc..5ae613a5e 100644
--- a/doc/man/Makefile.in
+++ b/doc/man/Makefile.in
@@ -265,7 +265,11 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_hostlist_destroy.3 \
 	man3/slurm_hostlist_shift.3 \
 	man3/slurm_allocate_resources.3 \
+	man3/slurm_allocate_resources_blocking.3 \
 	man3/slurm_allocation_lookup.3 \
+	man3/slurm_allocation_lookup_lite.3 \
+	man3/slurm_allocation_msg_thr_create.3 \
+	man3/slurm_allocation_msg_thr_destroy.3 \
 	man3/slurm_api_version.3 \
 	man3/slurm_checkpoint_able.3 \
 	man3/slurm_checkpoint_complete.3 \
@@ -275,27 +279,38 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_checkpoint_error.3 \
 	man3/slurm_checkpoint_failed.3 \
 	man3/slurm_checkpoint_restart.3 \
+	man3/slurm_checkpoint_task_complete.3 \
 	man3/slurm_checkpoint_vacate.3 \
+	man3/slurm_clear_trigger.3 \
 	man3/slurm_complete_job.3 \
 	man3/slurm_complete_job_step.3 \
 	man3/slurm_confirm_allocation.3 \
 	man3/slurm_free_ctl_conf.3 \
 	man3/slurm_free_job_info_msg.3 \
+	man3/slurm_free_job_alloc_info_response_msg.3 \
 	man3/slurm_free_job_step_create_response_msg.3 \
 	man3/slurm_free_job_step_info_response_msg.3 \
 	man3/slurm_free_node_info.3 \
+	man3/slurm_free_node_info_msg.3 \
 	man3/slurm_free_partition_info.3 \
+	man3/slurm_free_partition_info_msg.3 \
 	man3/slurm_free_resource_allocation_response_msg.3 \
+	man3/slurm_free_slurmd_status.3 \
 	man3/slurm_free_submit_response_response_msg.3 \
+	man3/slurm_free_trigger_msg.3 \
+	man3/slurm_get_checkpoint_file_path.3 \
 	man3/slurm_get_end_time.3 \
 	man3/slurm_get_errno.3 \
 	man3/slurm_get_job_steps.3 \
 	man3/slurm_get_rem_time.3 \
 	man3/slurm_get_select_jobinfo.3 \
+	man3/slurm_get_triggers.3 \
 	man3/slurm_init_job_desc_msg.3 \
 	man3/slurm_init_part_desc_msg.3 \
 	man3/slurm_job_step_create.3 \
 	man3/slurm_job_step_launch_t_init.3 \
+	man3/slurm_job_step_layout_get.3 \
+	man3/slurm_job_step_layout_free.3 \
 	man3/slurm_job_will_run.3 \
 	man3/slurm_jobinfo_ctx_get.3 \
 	man3/slurm_kill_job.3 \
@@ -305,8 +320,11 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_load_jobs.3 \
 	man3/slurm_load_node.3 \
 	man3/slurm_load_partitions.3 \
+	man3/slurm_load_slurmd_status.3 \
+	man3/slurm_notify_job.3 \
 	man3/slurm_perror.3 \
 	man3/slurm_pid2jobid.3 \
+	man3/slurm_ping.3 \
 	man3/slurm_print_ctl_conf.3 \
 	man3/slurm_print_job_info.3 \
 	man3/slurm_print_job_info_msg.3 \
@@ -316,20 +334,38 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_print_node_table.3 \
 	man3/slurm_print_partition_info.3 \
 	man3/slurm_print_partition_info_msg.3 \
+	man3/slurm_print_slurmd_status.3 \
+	man3/slurm_read_hostfile.3 \
 	man3/slurm_reconfigure.3 \
 	man3/slurm_resume.3 \
 	man3/slurm_requeue.3 \
+	man3/slurm_set_debug_level.3 \
+	man3/slurm_set_trigger.3 \
 	man3/slurm_shutdown.3 \
+	man3/slurm_signal_job.3 \
+	man3/slurm_signal_job_step.3 \
+	man3/slurm_slurmd_status.3 \
+	man3/slurm_sprint_job_info.3 \
+	man3/slurm_sprint_job_step_info.3 \
+	man3/slurm_sprint_node_table.3 \
+	man3/slurm_sprint_partition_info.3 \
 	man3/slurm_step_ctx_create.3 \
+	man3/slurm_step_ctx_create_no_alloc.3 \
+	man3/slurm_step_ctx_daemon_per_node_hack.3 \
 	man3/slurm_step_ctx_destroy.3 \
+	man3/slurm_step_ctx_params_t_init.3 \
 	man3/slurm_step_ctx_get.3 \
 	man3/slurm_step_launch.3 \
+	man3/slurm_step_launch_fwd_signal.3 \
 	man3/slurm_step_launch_abort.3 \
 	man3/slurm_step_launch_wait_finish.3 \
 	man3/slurm_step_launch_wait_start.3 \
 	man3/slurm_strerror.3 \
 	man3/slurm_submit_batch_job.3 \
 	man3/slurm_suspend.3 \
+	man3/slurm_terminate_job.3 \
+	man3/slurm_terminate_job_step.3 \
+	man3/slurm_trigger.3 \
 	man3/slurm_update_job.3 \
 	man3/slurm_update_node.3 \
 	man3/slurm_update_partition.3
diff --git a/doc/man/man1/sacct.1 b/doc/man/man1/sacct.1
index 83ebea364..ca501a074 100644
--- a/doc/man/man1/sacct.1
+++ b/doc/man/man1/sacct.1
@@ -80,6 +80,14 @@ This option has no effect when the
 \f3\-\-\-dump\fP 
 option is also specified.
 
+.TP 
+\f3\-C \fP\f2cluster_list\fP\f3,\fP  \f3\-\-cluster\fP\f3=\fP\f2cluster_list\fP
+Displays the statistics only for the jobs started on the clusters specified by
+the \f2cluster_list\fP operand, which is a comma\-separated list of clusters.
+Space characters are not allowed in the \f2cluster_list\fP.  -1 for
+all clusters, default is current cluster you are executing the sacct
+command on\&.
+
 .TP 
 \f3\-d \fP\f3,\fP \f3\-\-dump\fP
 Displays (dumps) the raw data records.
@@ -162,7 +170,7 @@ COMPLETED  3.0
 The default value for the 
 \f2field_list\fP 
 operand is 
-\f3"jobid,partition,process,ncpus,status,exitcode"\fP\c
+\f3"jobid,jobname,partition,ncpus,state,exitcode"\fP\c
 \&.
 .IP 
 This option has no effect when the 
@@ -178,17 +186,20 @@ command to read job accounting data from the named
 instead of the current SLURM job accounting log file.
 
 .TP 
-\f3\-g \fP\f2gid\fP\f3,\fP  \f3\-\-gid\fP\f3=\fP\f2gid\fP
-Displays the statistics only for the jobs started with GID 
-\f2gid\fP\c
-\&. 
+\f3\-g \fP\f2gid_list\fP\f3,\fP  \f3\-\-gid\fP\f3=\fP\f2gid_list\fP
+Displays the statistics only for the jobs started with the GID
+specified by the \f2gid_list\fP operand, which is a comma\-separated
+list of gids.  Space characters are not allowed in the \f2gid_list\fP. 
+Default is no restrictions.  This is virtually the same as the --group
+option\&. 
 
 .TP 
-\f3\-g \fP\f2group\fP\f3,\fP  \f3\-\-group\fP\f3=\fP\f2group\fP
-Displays the statistics only for the jobs started by users in the
-group
-\f2group\fP\c
-\&. 
+\f3\-g \fP\f2group_list\fP\f3,\fP  \f3\-\-group\fP\f3=\fP\f2group_list\fP
+Displays the statistics only for the jobs started with the GROUP
+specified by the \f2group_list\fP operand, which is a comma\-separated
+list of groups.  Space characters are not allowed in the \f2group_list\fP. 
+Default is no restrictions.  This is virtually the same as the --gid option\&. 
+
 .TP 
 \f3\-h \fP\f3,\fP \f3\-\-help\fP
 Displays a general help message.
@@ -374,16 +385,20 @@ Displays only the cumulative statistics for each job.
 Intermediate steps are displayed by default.
 
 .TP 
-\f3\-u \fP\f2uid\fP\f3,\fP  \f3\-\-uid\fP\f3=\fP\f2uid\fP
-Displays the statistics only for the jobs started by the user whose UID is 
-\f2uid\fP\c
-\&.
+\f3\-u \fP\f2uid_list\fP\f3,\fP  \f3\-\-uid\fP\f3=\fP\f2uid_list\fP
+Displays the statistics only for the jobs started by the specified
+\f2uid_list\fP operand, which is a comma\-separated list of uids.
+Space characters are not allowed in the \f2uid_list\fP.  
+-1 for all uids, default is current uid.  If run as user root default
+is all users.  This is virtually the same as the --user option\&.
 
 .TP 
-\f3\-u \fP\f2user\fP\f3,\fP  \f3\-\-user\fP\f3=\fP\f2user\fP
-Displays the statistics only for the jobs started by user
-\f2user\fP\c
-\&.
+\f3\-u \fP\f2user_list\fP\f3,\fP  \f3\-\-user\fP\f3=\fP\f2user_list\fP
+Displays the statistics only for the jobs started by the specified
+\f2user_list\fP operand, which is a comma\-separated list of users.
+Space characters are not allowed in the \f2user_list\fP.  
+-1 for all uids, default is current uid.  If run as user root default
+is all users.  This is virtually the same as the --uid option\&.
 
 .TP 
 \f3\-\-usage\fP
diff --git a/doc/man/man1/sacctmgr.1 b/doc/man/man1/sacctmgr.1
index e57ba52c1..cc6669ec9 100644
--- a/doc/man/man1/sacctmgr.1
+++ b/doc/man/man1/sacctmgr.1
@@ -1,4 +1,4 @@
-.TH SACCTMGR "1" "June 2008" "sacctmgr 1.3" "Slurm components"
+.TH SACCTMGR "1" "October 2008" "sacctmgr 1.3" "Slurm components"
 
 .SH "NAME"
 sacctmgr \- Used to view and modify Slurm account information.
@@ -13,7 +13,7 @@ being provided by \fBslurmdbd\fR (Slurm Database daemon).
 This database can serve as a central storehouse of user and 
 computer information for multiple computers at a single site.
 Slurm account information is recorded based upon four parameters
-that form what is refered to as an \fIassociation\fR. 
+that form what is referred to as an \fIassociation\fR. 
 These parameters are \fIuser\fR, \fIcluster\fR, \fIpartition\fR, and 
 \fIaccount\fR. \fIuser\fR is the login name.
 \fIcluster\fR is the name of a Slurm managed cluster as specified by 
@@ -31,7 +31,7 @@ Print a help message describing the usage of \fBssacctmgr\fR.
 This is equivalent to the \fBhelp\fR command.
 
 .TP
-\fB\-\-immediate\fR
+\fB\-i\fR, \fB\-\-immediate\fR
 commit changes immediately.
 
 .TP
@@ -93,9 +93,13 @@ Add an entity.
 Identical to the \fBadd\fR command.
 
 .TP
-\fBdelete\fR \fIENTITY\fR with \fISPECS\fR
+\fBdelete\fR <\fIENTITY\fR> with <\fISPECS\fR>
 Delete the specified entities.
 
+.TP
+\fBdump\fR <\fIENTITY\fR> with <\fIFILENAME\fR>
+Dump cluster data to the specified file.
+
 .TP
 \fBexit\fP
 Terminate sacctmgr.
@@ -111,6 +115,10 @@ Display information about the specified entities.
 By default, all entities are displayed.
 Identical to the \fBshow\fR command.
 
+.TP
+\fBload\fR <\fIFILENAME\fR>
+Load cluster data to the specified file.
+
 .TP
 \fBmodify\fR <\fIENTITY\fR> \fbwith\fR <\fISPECS\fR> \fbset\fR <\fISPECS\fR>
 Modify an entity.
@@ -154,7 +162,7 @@ Repeat the last command.
 .TP
 \fIaccount\fP
 A bank account, typically specified at job submit time using the 
-\fI--account=\fR option.
+\fI\-\-account=\fR option.
 These may be arranged in a hierarchical fashion, for example
 accounts \fIchemistry\fR and \fIphysics\fR may be children of
 the account \fIscience\fR. 
@@ -172,7 +180,7 @@ file, used to differentiate accounts from on different machines.
 
 .TP
 \fIcoordinator\fR
-A special priviaged user usually an account manager or such that can
+A special privileged user usually an account manager or such that can
 add users or sub accounts to the account they are coordinator over.
 This should be a trusted person since they can change limits on
 account and user associations inside their realm.
@@ -183,42 +191,114 @@ Quality of Service (For use with MOAB only).
 
 .TP
 \fItransaction\fR
-List of transactions that have occured during a given time period.
+List of transactions that have occurred during a given time period.
 
 .TP
 \fIuser\fR
 The login name.
 
+
 .TP
 \fBSPECIFICATIONS FOR ACCOUNTS\fR
+
 .TP
 \fICluster\fP=<cluster>
 Specific cluster to add account to.  Default is all in system.
+
 .TP
 \fIDescription\fP=<description>
 An arbitrary string describing an account.
+
 .TP
 \fIFairshare\fP=<fairshare>
 Number used in conjunction with other accounts to determine job priority.
 To clear a previously set value use the modify command with a new value of \-1.
+
+.TP
+\fIGrpCPUMins\fP=<max cpu hours> 
+Maximum number of CPU hours running jobs are able to be allocated in aggregate for 
+this association and all association which are children of this association.
+To clear a previously set value use the modify command with a new
+value of \-1.  (NOTE: this limit is not currently enforced in SLURM.
+You can still set this, but have to wait for future versions of SLURM
+before it is enforced.)
+
+.TP
+\fIGrpCPUs\fP=<max cpus>
+Maximum number of CPUs running jobs are able to be allocated in aggregate for 
+this association and all association which are children of this association.
+To clear a previously set value use the modify command with a new
+value of \-1.  (NOTE: this limit is not currently enforced in SLURM.
+You can still set this, but have to wait for future versions of SLURM
+before it is enforced.)
+
+.TP
+\fIGrpJobs\fP=<max cpus>
+Maximum number of running jobs in aggregate for 
+this association and all association which are children of this association.
+To clear a previously set value use the modify command with a new value of \-1.
+
+.TP
+\fIGrpNodes\fP=<max nodes>
+Maximum number of nodes running jobs are able to be allocated in aggregate for 
+this association and all association which are children of this association.
+To clear a previously set value use the modify command with a new value of \-1.
+
+.TP
+\fIGrpSubmitJobs\fP=<max jobs>
+Maximum number of jobs which can be in a pending or running state at any time 
+in aggregate for this association and all association which are children of 
+this association.
+To clear a previously set value use the modify command with a new value of \-1.
+
+.TP
+\fIGrpWall\fP=<max wall>
+Maximum wall clock time running jobs are able to be allocated in aggregate for 
+this association and all association which are children of this association.
+To clear a previously set value use the modify command with a new value of \-1.
+
 .TP
-\fIMaxCPUSecs\fP=<max cpu seconds> 
-Maximum number of cpu seconds each job is able to use in this account.
+\fIMaxCPUMins\fP=<max cpu minutes> 
+Maximum number of CPU minutes each job is able to use in this account.
 This is overridden if set directly on a user. 
 Default is the cluster's limit.
-To clear a previously set value use the modify command with a new value of \-1.
+To clear a previously set value use the modify command with a new
+value of \-1.  (NOTE: this limit is not currently enforced in SLURM.
+You can still set this, but have to wait for future versions of SLURM
+before it is enforced.)
+
+.TP
+\fIMaxCPUs\fP=<max cpus>
+Maximum number of CPUs each job is able to use in this account.
+This is overridden if set directly on a user. 
+Default is the cluster's limit.
+To clear a previously set value use the modify command with a new
+value of \-1.  (NOTE: this limit is not currently enforced in SLURM.
+You can still set this, but have to wait for future versions of SLURM
+before it is enforced.)
+
 .TP
 \fIMaxJobs\fP=<max jobs>
 Maximum number of jobs each user is allowed to run at one time in this account.
 This is overridden if set directly on a user. 
 Default is the cluster's limit.
 To clear a previously set value use the modify command with a new value of \-1.
+
 .TP
 \fIMaxNodes\fP=<max nodes>
 Maximum number of nodes each job is able to use in this account.
 This is overridden if set directly on a user. 
 Default is the cluster's limit.
 To clear a previously set value use the modify command with a new value of \-1.
+This is a c\-node limit on BlueGene systems.
+
+.TP
+\fIMaxSubmitJobs\fP=<max jobs>
+Maximum number of jobs which can this account can have in a pending or running
+state at any time.
+Default is the cluster's limit.
+To clear a previously set value use the modify command with a new value of \-1.
+
 .TP
 \fIMaxWall\fP=<max wall>
 Maximum wall clock time each job is able to use in this account.
@@ -228,49 +308,140 @@ Default is the cluster's limit.
 <days>\-<hr>:<min>:<sec> or <days>\-<hr>.
 The value is recorded in minutes with rounding as needed.
 To clear a previously set value use the modify command with a new value of \-1.
+
 .TP
 \fIName\fP=<name>
 The name of a bank account.
+
 .TP
 \fIOrganization\fP=<org>
 Organization to which the account belongs.
+
 .TP
 \fIParent\fP=<parent>
 Parent account of this account. Default is no parent, a top level account.
+
 .TP
-\fIQosLevel\fP=<qos>
-Quality of Service jobs are to run at for this account.  Now consisting
-of Normal, Standby, Expedite, and Exempt.
-This is overridden if set directly on a user. 
+\fIPartition\fP=<name>
+Name of SLURM partition these limits apply to.
+
+.TP
+\fIQosLevel\fP<operator><comma separated list of qos names>
+(For use with MOAB only.)
+Specify the default Quality of Service's that jobs are able to run at
+for this account.  To get a list of vaild QOS's use 'sacctmgr list qos'. 
+This value will override it's parents value and push down to it's
+childern as the new default.  Setting a QosLevel to '' (two single
+quotes with nothing between them) restores it's default setting.  You
+can also use the operator += and \-= to add or remove certain QOS's
+from a QOS list.
+
+Valid <operator> values include:
+.RS
+.TP 5
+\fB=\fR
+Set \fIQosLevel\fP to the specified value.
+.TP
+\fB+=\fR
+Add the specified <qos> value to the current \fIQosLevel\fP .
+.TP
+\fB\-=\fR
+Remove the specified <qos> value from the current \fIQosLevel\fP.
+.RE
+
 
 .TP
 \fBSPECIFICATIONS FOR CLUSTERS\fR
+
 .TP
 \fIFairshare\fP=<fairshare>
 Number used in conjunction with other accounts to determine job priority.
 To clear a previously set value use the modify command with a new value of \-1.
+
 .TP
-\fIName\fP=<name>
-The name of a cluster.
-This should be equal to the \fIClusterName\fR parameter in the \fIslurm.conf\fR 
-configuration file for some Slurm-managed cluster. 
+\fIGrpCPUMins\fP=<max cpu hours> 
+Maximum number of CPU hours running jobs are able to be allocated in aggregate for 
+this association and all association which are children of this association.
+To clear a previously set value use the modify command with a new
+value of \-1.  (NOTE: this limit is not currently enforced in SLURM.
+You can still set this, but have to wait for future versions of SLURM
+before it is enforced.)
+
 .TP
-\fIMaxCPUSecs\fP=<max cpu seconds> 
-Maximum number of cpu seconds each job is able to use in this account.
+\fIGrpCPUs\fP=<max cpus>
+Maximum number of CPUs running jobs are able to be allocated in aggregate for 
+this association and all association which are children of this association.
+To clear a previously set value use the modify command with a new
+value of \-1.  (NOTE: this limit is not currently enforced in SLURM.
+You can still set this, but have to wait for future versions of SLURM
+before it is enforced.)
+
+.TP
+\fIGrpJobs\fP=<max cpus>
+Maximum number of running jobs in aggregate for 
+this association and all association which are children of this association.
+To clear a previously set value use the modify command with a new value of \-1.
+
+.TP
+\fIGrpNodes\fP=<max nodes>
+Maximum number of nodes running jobs are able to be allocated in aggregate for 
+this association and all association which are children of this association.
+To clear a previously set value use the modify command with a new value of \-1.
+
+.TP
+\fIGrpSubmitJobs\fP=<max jobs>
+Maximum number of jobs which can be in a pending or running state at any time 
+in aggregate for this association and all association which are children of 
+this association.
+To clear a previously set value use the modify command with a new value of \-1.
+
+.TP
+\fIGrpWall\fP=<max wall>
+Maximum wall clock time running jobs are able to be allocated in aggregate for 
+this association and all association which are children of this association.
+To clear a previously set value use the modify command with a new value of \-1.
+
+.TP
+\fIMaxCPUMins\fP=<max cpu minutes> 
+Maximum number of CPU minutes each job is able to use in this account.
 This is overridden if set directly on an account or user. 
 Default is no limit.
-To clear a previously set value use the modify command with a new value of \-1.
+To clear a previously set value use the modify command with a new
+value of \-1.  (NOTE: this limit is not currently enforced in SLURM.
+You can still set this, but have to wait for future versions of SLURM
+before it is enforced.)
+ 
+.TP
+\fIMaxCPUs\fP=<max cpus>
+Maximum number of cpus each job is able to use in this account.
+This is overridden if set directly on an account or user. 
+Default is no limit.
+To clear a previously set value use the modify command with a new
+value of \-1.  (NOTE: this limit is not currently enforced in SLURM.
+You can still set this, but have to wait for future versions of SLURM
+before it is enforced.)
+
+.TP
 \fIMaxJobs\fP=<max jobs>
 Maximum number of jobs each user is allowed to run at one time in this account.
 This is overridden if set directly on an account or user. 
 Default is no limit.
 To clear a previously set value use the modify command with a new value of \-1.
+
 .TP
 \fIMaxNodes\fP=<max nodes>
 Maximum number of nodes each job is able to use in this account.
 This is overridden if set directly on an account or user. 
 Default is no limit.
 To clear a previously set value use the modify command with a new value of \-1.
+This is a c\-node limit on BlueGene systems.
+
+.TP
+\fIMaxSubmitJobs\fP=<max jobs>
+Maximum number of jobs which can this account can have in a pending or running
+state at any time.
+To clear a previously set value use the modify command with a new value of \-1.
+
 .TP
 \fIMaxWall\fP=<max wall>
 Maximum wall clock time each job is able to use in this account.
@@ -280,21 +451,46 @@ Default is no limit.
 <days>\-<hr>:<min>:<sec> or <days>\-<hr>.
 The value is recorded in minutes with rounding as needed.
 To clear a previously set value use the modify command with a new value of \-1.
+
 .TP
-\fIQosLevel\fP=<qos>
-Quality of Service jobs are to run at for this account.  Now consisting
-of Normal, Standby, Expedite, and Exempt.
-This is overridden if set directly on an account or user. 
+\fIName\fP=<name>
+The name of a cluster.
+This should be equal to the \fIClusterName\fR parameter in the \fIslurm.conf\fR 
+configuration file for some Slurm\-managed cluster. 
+
+.TP
+\fIQosLevel\fP<operator><comma separated list of qos names>
+(For use with MOAB only.)
+Specify the default Quality of Service's that jobs are able to run at
+for this cluster.  To get a list of vaild QOS's use 'sacctmgr list qos'. 
+This value is overridden if a child has a QOS value directly set.
+Setting a QosLevel to '' (two single quotes with nothing between them)
+restores it's default setting.  You can also use the operator += and
+\-= to add or remove certain QOS's from a QOS list.
+Valid <operator> values include:
+.RS
+.TP 5
+\fB=\fR
+Set \fIQosLevel\fP to the specified value.
+.TP
+\fB+=\fR
+Add the specified <qos> value to the current \fIQosLevel\fP .
+.TP
+\fB\-=\fR
+Remove the specified <qos> value from the current \fIQosLevel\fP.
+.RE
+
 
 .TP
 \fBSPECIFICATIONS FOR COORDINATOR\fR
 .TP
-\fIAccountsfP=<comma separated list of account names>
+\fIAccounts\fP=<comma separated list of account names>
 Account name to add this user as a coordinator to.
 .TP
 \fINames\fP=<comma separated list of user names>
 Names of coordinators.
 
+
 .TP
 \fBSPECIFICATIONS FOR QOS\fR
 .TP
@@ -306,48 +502,60 @@ Names of qos.
 
 .TP
 \fBSPECIFICATIONS FOR USERS\fR
+
 .TP
 \fIAccount\fP=<account>
 Account name to add this user to.
+
 .TP
 \fIAdminLevel\fP=<level>
 Admin level of user.  Valid levels are None, Operator, and Admin.
+
 .TP
 \fICluster\fP=<cluster>
 Specific cluster to add user to the account on.  Default is all in system.
+
 .TP
 \fIDefaultAccount\fP=<account>
 Identify the default bank account name to be used for a job if none is 
 specified at submission time.
+
 .TP
 \fIFairshare\fP=<fairshare>
 Number used in conjunction with other users in the same account to
 determine job priority.
 To clear a previously set value use the modify command with a new value of \-1.
+
 .TP
-\fIName\fP=<name>
-Name of user.
-.TP
-\fIQosLevel\fP=<qos>
-The Quality of Service jobs are to run at for this user using the
-account specified.  Now consisting of Normal, Standby, Expedite, and Exempt.
+\fIMaxCPUMins\fP=<max cpu minutes> 
+Maximum number of CPU minutes each job is able to use for this user.
+To clear a previously set value use the modify command with a new
+value of \-1.  (NOTE: this limit is not currently enforced in SLURM.
+You can still set this, but have to wait for future versions of SLURM
+before it is enforced.)
+
 .TP
-\fIMaxCPUSecs\fP=<max cpu seconds> 
-Maximum number of cpu seconds this user can use in each job using the
-account specified.
-To clear a previously set value use the modify command with a new value of \-1.
+\fIMaxCPUs\fP=<max cpus>
+Maximum number of CPUs each job is able to use for this user.
+Default is the account's limit.
+To clear a previously set value use the modify command with a new
+value of \-1.  (NOTE: this limit is not currently enforced in SLURM.
+You can still set this, but have to wait for future versions of SLURM
+before it is enforced.)
+
 .TP
 \fIMaxJobs\fP=<max jobs>
-Maximum number of jobs this user can run at a given time using the
-account specified.
-This is overridden if set directly on a user. 
+Maximum number of jobs each user is allowed to run at one time for this user.
 Default is the account's limit.
 To clear a previously set value use the modify command with a new value of \-1.
+
 .TP
 \fIMaxNodes\fP=<max nodes>
 Maximum number of nodes this user can allocate in each job using the
 account specified. 
 Default is the account's limit.
+This is a c\-node limit on BlueGene systems.
+
 .TP
 \fIMaxWall\fP=<max wall>
 Maximum wall clock time this user can use in each job using the
@@ -358,10 +566,255 @@ Default is the account's limit.
 The is recorded in minutes with rounding as needed.
 To clear a previously set value use the modify command with a new value of \-1.
 
+.TP
+\fIName\fP=<name>
+Name of user.
+
+.TP
+\fIPartition\fP=<name>
+Name of SLURM partition these limits apply to.
+
+.TP
+\fIQosLevel\fP<operator><comma separated list of qos names>
+(For use with MOAB only.)
+Specify the default Quality of Service's that jobs are able to run at
+for this user.  To get a list of vaild QOS's use 'sacctmgr list qos'. 
+This value will override it's parents value.
+Setting a QosLevel to '' (two single quotes with nothing between them)
+restores it's default setting.  You can also use the operator += and
+\-= to add or remove certain QOS's from a QOS list.
+.RS
+.TP 5
+\fB=\fR
+Set \fIQosLevel\fP to the specified value.
+.TP
+\fB+=\fR
+Add the specified <qos> value to the current \fIQosLevel\fP .
+.TP
+\fB\-=\fR
+Remove the specified <qos> value from the current \fIQosLevel\fP.
+.RE
+
+
+.SH "FLAT FILE DUMP AND LOAD"
+sacctmgr has the capability to load and dump SLURM association data to and
+from a file.  This method can easily add a new cluster or copy an
+existing clusters associations into a new cluster with similar
+accounts. Each file contains SLURM association data for a single
+cluster.  Comments can be put into the file with the # character.
+Each line of information must begin with one of the four titles; \fBCluster, Parent, Account or
+User\fP. Following the title is a space, dash, space, entity value,
+then specifications. Specifications are colon separated.  If any
+variable such as Organization has a space in it surround the name with
+single or double quotes.
+
+To create a file of associations one can run
+
+> sacctmgr dump tux file=tux.cfg
+.br
+(file=tux.cfg is optional)
+
+To load a previously created file you can run
+
+> sacctmgr load file=tux.cfg
+
+Other options for load are \-
+
+clean \- delete what was already there and start from scratch with this
+information.
+.br
+Cluster= \- specify a different name for the cluster than that which is
+in the file.
+
+Quick explanation how the file works.
+
+Since the associations in the system follow a hierarchy so does the
+file.  Any thing that is a parent needs to be defined before any
+children.  The only exception is the understood 'root' account.  This
+is always a default for any cluster and does not need to be defined.
+
+To edit/create a file start with a cluster line for the new cluster
+
+\fBCluster\ \-\ cluster_name:MaxNodesPerJob=15\fP
+
+Anything included on this line will be the defaults for all
+associations on this cluster.  These options are as follows...
+.TP
+GrpCPUMins=  
+Maximum number of CPU hours running jobs are able to
+be allocated in aggregate for this association and all association
+which are children of this association. (NOTE: this limit is not
+currently enforced in SLURM. You can still set this, but have to wait
+for future versions of SLURM before it is enforced.)
+.TP
+GrpCPUs= 
+Maximum number of CPUs running jobs are able to be
+allocated in aggregate for this association and all association which
+are children of this association. (NOTE: this limit is not currently
+enforced in SLURM. You can still set this, but have to wait for future
+versions of SLURM before it is enforced.)
+.TP
+GrpJobs= 
+Maximum number of running jobs in aggregate for this
+association and all association which are children of this association.
+.TP
+GrpNodes= 
+Maximum number of nodes running jobs are able to be
+allocated in aggregate for this association and all association which
+are children of this association.
+.TP
+GrpSubmitJobs= 
+Maximum number of jobs which can be in a pending or
+running state at any time in aggregate for this association and all
+association which are children of this association. 
+.TP
+GrpWall= 
+Maximum wall clock time running jobs are able to be
+allocated in aggregate for this association and all association which
+are children of this association. 
+.TP
+FairShare= 
+To be used with a scheduler like MOAB to determine priority.
+.TP
+MaxJobs= 
+Maximum number of jobs the children of this account can run.
+.TP
+MaxNodesPerJob= 
+Maximum number of nodes per job the children of this account can run.
+.TP
+MaxProcSecondsPerJob= 
+Maximum cpu seconds children of this accounts jobs can run.
+.TP
+MaxWallDurationPerJob= 
+Maximum time (not related to job size) children of this accounts jobs can run.
+.TP
+QOS= 
+Comma separated list of Quality of Service names (Defined in sacctmgr).
+.TP
+
+Followed by Accounts you want in this fashion...
+
+\fBParent\ \-\ root\fP (Defined by default)
+.br
+\fBAccount\ \-\ cs\fP:MaxNodesPerJob=5:MaxJobs=4:MaxProcSecondsPerJob=20:FairShare=399:MaxWallDurationPerJob=40:Description='Computer Science':Organization='LC'
+.br
+\fBParent\ \-\ cs\fP
+.br
+\fBAccount\ \-\ test\fP:MaxNodesPerJob=1:MaxJobs=1:MaxProcSecondsPerJob=1:FairShare=1:MaxWallDurationPerJob=1:Description='Test Account':Organization='Test'
+
+.TP
+Any of the options after a ':' can be left out and they can be in any order.
+If you want to add any sub accounts just list the Parent THAT HAS ALREADY 
+BEEN CREATED before the account line in this fashion...
+.TP
+All account options are
+.TP
+Description= 
+A brief description of the account.
+.TP
+GrpCPUMins=  
+Maximum number of CPU hours running jobs are able to
+be allocated in aggregate for this association and all association
+which are children of this association. (NOTE: this limit is not
+currently enforced in SLURM. You can still set this, but have to wait
+for future versions of SLURM before it is enforced.)
+.TP
+GrpCPUs= 
+Maximum number of CPUs running jobs are able to be
+allocated in aggregate for this association and all association which
+are children of this association. (NOTE: this limit is not currently
+enforced in SLURM. You can still set this, but have to wait for future
+versions of SLURM before it is enforced.)
+.TP
+GrpJobs= 
+Maximum number of running jobs in aggregate for this
+association and all association which are children of this association.
+.TP
+GrpNodes= 
+Maximum number of nodes running jobs are able to be
+allocated in aggregate for this association and all association which
+are children of this association.
+.TP
+GrpSubmitJobs= 
+Maximum number of jobs which can be in a pending or
+running state at any time in aggregate for this association and all
+association which are children of this association. 
+.TP
+GrpWall= 
+Maximum wall clock time running jobs are able to be
+allocated in aggregate for this association and all association which
+are children of this association. 
+.TP
+FairShare= 
+To be used with a scheduler like MOAB to determine priority.
+.TP
+MaxJobs= 
+Maximum number of jobs the children of this account can run.
+.TP
+MaxNodesPerJob= 
+Maximum number of nodes per job the children of this account can run.
+.TP
+MaxProcSecondsPerJob= 
+Maximum cpu seconds children of this accounts jobs can run.
+.TP
+MaxWallDurationPerJob= 
+Maximum time (not related to job size) children of this accounts jobs can run.
+.TP
+Organization= 
+Name of organization that owns this account.
+.TP
+QOS(=,+=,\-=) 
+Comma separated list of Quality of Service names (Defined in sacctmgr).
+.TP
+
+.TP
+To add users to a account add a line like this after a Parent \- line
+\fBParent\ \-\ test\fP
+.br
+\fBUser\ \-\ adam\fP:MaxNodesPerJob=2:MaxJobs=3:MaxProcSecondsPerJob=4:FairShare=1:MaxWallDurationPerJob=1:AdminLevel=Operator:Coordinator='test'
+
+.TP
+All user options are
+.TP
+AdminLevel=
+Type of admin this user is (Administrator, Operator)
+.br
+\fBMust be defined on the first occurrence of the user.\fP
+.TP
+Coordinator=
+Comma separated list of accounts this user is coordinator over
+.br
+\fBMust be defined on the first occurrence of the user.\fP
+.TP
+DefaultAccount=
+system wide default account name
+.br
+\fBMust be defined on the first occurrence of the user.\fP
+.TP
+FairShare= 
+To be used with a scheduler like MOAB to determine priority.
+.TP
+MaxJobs= 
+Maximum number of jobs this user can run.
+.TP
+MaxNodesPerJob= 
+Maximum number of nodes per job this user can run.
+.TP
+MaxProcSecondsPerJob= 
+Maximum cpu seconds this user can run per job.
+.TP
+MaxWallDurationPerJob= 
+Maximum time (not related to job size) this user can run.
+.TP
+QOS(=,+=,\-=) 
+Comma separated list of Quality of Service names (Defined in sacctmgr).
+.RE
+
+
 .SH "EXAMPLES"
 .eo
 .br
-> sacctmgr create cluster=tux
+> sacctmgr create cluster tux
 .br
 > sacctmgr create account name=science fairshare=50
 .br
@@ -369,13 +822,67 @@ To clear a previously set value use the modify command with a new value of \-1.
 .br
 > sacctmgr create account name=physics parent=science fairshare=20
 .br
-> sacctmgr create user name=adam cluster=tux account=physics \
-.br 
-  fairshare=10
+> sacctmgr create user name=adam cluster=tux account=physics fairshare=10
+.br
+> sacctmgr modify user name=adam cluster=tux account=physics set
+  maxjobs=2 maxtime=30:00
+.br
+> sacctmgr dump cluster=tux tux_data_file
+.br
+> sacctmgr load tux_data_file
+.br
+
+.br
+When modifying an object placing the key words 'set' and the
+optional 'where' is crtical to perform correctly below are examples to
+produce correct results.  As a rule of thumb any thing you put infront
+of the set will be used as a quantifier.  If you want to put a
+quantifier after the key word 'set' you should use the key
+word 'where'.
+.br
+
+.br
+wrong> sacctmgr modify user name=adam set fairshare=10 cluster=tux
+.br
+
+.br
+This will produce an error as the above line reads modify user adam
+set fairshare=10 and cluster=tux.  
+.br
+
+.br
+right> sacctmgr modify user name=adam cluster=tux set fairshare=10
+.br
+right> sacctmgr modify user name=adam set fairshare=10 where cluster=tux 
+.br
+
+.br
+(For use with MOAB only)
+When changing qos for something only use the '=' operator when wanting
+to explitally set the qos to something.  In most cases you will want
+to use the '+=' or '-=' operator to either add to or remove from the
+existing qos already in place.
+.br
+
+.br
+If a user already has qos of normal,standby for a parent or it was
+explicitly set you should use qos+=expedite to add this to the list in
+this fashon.
+.br
+
+.br
+> sacctmgr modify user name=adam set qos+=expedite
+.br
+
+.br
+If you are looking to only add the qos expedite to only a certain
+accoun and or cluster you can do that by specifing them in the
+sacctmgr line.
+.br
+
 .br
-> sacctmgr modify user with name=adam cluster=tux account=physics \
+> sacctmgr modify user name=adam acct=this cluster=tux set qos+=expedite
 .br
-  set maxjobs=2 maxtime=30:00
 .ec
 
 .SH "COPYING"
diff --git a/doc/man/man1/salloc.1 b/doc/man/man1/salloc.1
index 58d7fc300..c4451cfaa 100644
--- a/doc/man/man1/salloc.1
+++ b/doc/man/man1/salloc.1
@@ -18,7 +18,10 @@ complete, salloc relinquishes the job allocation.
 
 The command may be any program the user wishes.  Some typical commands are 
 xterm, a shell script containing srun commands, and srun (see the EXAMPLES 
-section). If no command is specified, the user's default shell.
+section). If no command is specified, then the value of 
+\fBSallocDefaultCommand\fR in slurm.conf is used. If 
+\fBSallocDefaultCommand\fR is not set, then \fBsalloc\fR runs the 
+user's default shell.
 
 .SH "OPTIONS"
 .LP 
@@ -97,6 +100,11 @@ The \fIlist\fR of constraints may include multiple features separated
 by ampersand (AND) and/or vertical bar (OR) operators.
 For example: \fB\-\-constraint="opteron&video"\fR or 
 \fB\-\-constraint="fast|faster"\fR.
+In the first example, only nodes having both the feature "opteron" AND
+the feature "video" will be used.
+There is no mechanism to specify that you want one node with feature
+"opteron" and another node with feature "video" in that case that no
+node has both features.
 If only one of a set of possible options should be used for all allocated 
 nodes, then use the OR operator and enclose the options within square brackets. 
 For example: "\fB\-\-constraint="[rack1|rack2|rack3|rack4]"\fR might 
@@ -488,6 +496,10 @@ in some failed state (non-zero exit code, node failure, timed out, etc).
 \fBafterok:job_id[:jobid...]\fR
 This job can begin execution after the specified jobs have successfully
 executed (ran to completion with non-zero exit code).
+.TP
+\fBsingleton\fR
+This job can begin execution after any previously launched jobs sharing the same
+job name and user have terminated.
 .RE
 
 .TP
diff --git a/doc/man/man1/sbatch.1 b/doc/man/man1/sbatch.1
index 918f78431..45290f9ce 100644
--- a/doc/man/man1/sbatch.1
+++ b/doc/man/man1/sbatch.1
@@ -90,6 +90,11 @@ The \fIlist\fR of constraints may include multiple features separated
 by ampersand (AND) and/or vertical bar (OR) operators.
 For example: \fB\-\-constraint="opteron&video"\fR or 
 \fB\-\-constraint="fast|faster"\fR.
+In the first example, only nodes having both the feature "opteron" AND
+the feature "video" will be used.
+There is no mechanism to specify that you want one node with feature
+"opteron" and another node with feature "video" in that case that no
+node has both features.
 If only one of a set of possible options should be used for all allocated 
 nodes, then use the OR operator and enclose the options within square brackets. 
 For example: "\fB\-\-constraint="[rack1|rack2|rack3|rack4]"\fR might 
@@ -560,6 +565,10 @@ in some failed state (non-zero exit code, node failure, timed out, etc).
 \fBafterok:job_id[:jobid...]\fR
 This job can begin execution after the specified jobs have successfully
 executed (ran to completion with non-zero exit code).
+.TP
+\fBsingleton\fR
+This job can begin execution after any previously launched jobs sharing the same
+job name and user have terminated.
 .RE
 
 .TP
@@ -779,6 +788,12 @@ The select/cons_res plugin allocates individual processors
 to jobs, so this number indicates the number of processors
 on this node allocated to the job.
 .TP
+\fBSLURM_JOB_DEPENDENCY\fR
+Set to value of the \-\-dependency option.
+.TP
+\fBSLURM_JOB_NAME\fR
+Name of the job.
+.TP
 \fBSLURM_JOB_NODELIST\fR (and \fBSLURM_NODELIST\fR for backwards compatibility)
 List of nodes allocated to the job.
 .TP
@@ -849,7 +864,7 @@ host4
 .LP 
 Pass a batch script to sbatch on standard input:
 .IP 
-morrone:~$ sbatch \-N4 <<EOF
+$ sbatch \-N4 <<EOF
 .br
 > #!/bin/sh
 .br
diff --git a/doc/man/man1/squeue.1 b/doc/man/man1/squeue.1
index 6ebadd58c..9941d0e60 100644
--- a/doc/man/man1/squeue.1
+++ b/doc/man/man1/squeue.1
@@ -119,8 +119,10 @@ Minimum size of temporary disk space (in MB) requested by the job.
 \fB%D\fR 
 Number of nodes allocated to the job or the minimum number of nodes 
 required by a pending job. The actual number of nodes allocated to a pending 
-job may exceed this number of the job specified a node range count or the 
-cluster contains nodes with varying processor counts.
+job may exceed this number if the job specified a node range count (e.g. 
+minimum and maximum node counts) or the the job specifies a processor 
+count instead of a node count and the cluster contains nodes with varying 
+processor counts.
 .TP
 \fB%e\fR 
 Time at which the job ended or is expected to end (based upon its time limit)
diff --git a/doc/man/man1/sreport.1 b/doc/man/man1/sreport.1
index e07f8c0cf..aa2ef4857 100644
--- a/doc/man/man1/sreport.1
+++ b/doc/man/man1/sreport.1
@@ -1,4 +1,4 @@
-.TH SREPORT "1" "May 2008" "sreport 1.3" "Slurm components"
+.TH SREPORT "1" "October 2008" "sreport 1.3" "Slurm components"
 
 .SH "NAME"
 sreport \- Used to generate reports from the slurm accounting data.
@@ -18,6 +18,7 @@ being provided by the \fBslurmdbd\fR (Slurm Database daemon).
 \fB\-a\fR, \fB\-\-all_clusters\fR
 Use all clusters instead of only cluster from where the command was run.
 
+.TP
 \fB\-h\fR, \fB\-\-help\fR
 Print a help message describing the usage of \fBssreport\fR.
 This is equivalent to the \fBhelp\fR command.
@@ -26,11 +27,6 @@ This is equivalent to the \fBhelp\fR command.
 \fB\-n\fR, \fB\-\-no_header\fR
 Don't display header when listing results.
 
-.TP
-\fB\-q\fR, \fB\-\-quiet\fR
-Print no warning or informational messages, only error messages.
-This is equivalent to the \fBquiet\fR command.
-
 .TP
 \fB\-p\fR, \fB\-\-parsable\fR
 Output will be '|' delimited with a '|' at the end.
@@ -39,6 +35,18 @@ Output will be '|' delimited with a '|' at the end.
 \fB\-P\fR, \fB\-\-parsable2\fR
 Output will be '|' delimited without a '|' at the end.
 
+.TP
+\fB\-q\fR, \fB\-\-quiet\fR
+Print no warning or informational messages, only error messages.
+This is equivalent to the \fBquiet\fR command.
+
+.TP
+\fB\-t <format>\fR
+Specify the output time format. 
+Time format options are case insensitive and may be abbreviated.
+The default format is Minutes.
+Supported time format options are listed with the \fBtime\fP command below.
+
 .TP
 \fB\-v\fR, \fB\-\-verbose\fR
 Print detailed event logging. 
@@ -69,6 +77,36 @@ Print no warning or informational messages, only fatal error messages.
 Terminate the execution of sreport.
 Identical to the \fBexit\fR command.
 
+.TP
+\fBtime <time_format>\fP
+Specify the output time format. 
+Time format options are case insensitive and may be abbreviated.
+The default format is Minutes.
+Supported time format options include:
+.RS
+.TP 10
+\fBSecPer\fR
+Seconds/Percentage of Total
+.TP
+\fBMinPer\fR
+Minutes/Percentage of Total
+.TP
+\fBHourPer\fR
+Hours/Percentage of Total
+.TP
+\fBSeconds\fR
+Seconds
+.TP
+\fBMinutes\fR
+Minutes
+.TP
+\fBHours\fR
+Hours
+.TP
+\fBPercent\fR
+Percentage of Total
+.RE
+
 .TP
 \fBverbose\fP
 Print detailed event logging. 
@@ -78,6 +116,7 @@ This is an independent command with no options meant for use in interactive mode
 .TP
 \fBversion\fP
 Display the version number of sreport being executed.
+     \-q or \-\-quiet: equivalent to \"quiet\" command                        \n\
 
 .TP
 \fB!!\fP
@@ -89,9 +128,48 @@ Repeat the last command executed.
 
 .TP
 Various reports are as follows...
-     cluster - Utilization
-     job     - Sizes
-     user    - TopUsage
+     cluster \- AccountUtilizationByUser, UserUtilizationByAccount, Utilization
+     job     \- Sizes
+     user    \- TopUsage
+
+.TP
+
+.TP
+REPORT DESCRIPTION
+.RS
+.TP
+.B cluster AccountUtilizationByUser 
+This report will display account utilization as it appears on the
+hierarchical tree.  Starting with the specified account or the
+root account by default this report will list the underlying
+usage with a sum on each level.  Use the 'tree' option to span
+the tree for better visibility.
+.TP 
+.B cluster UserUtilizationByAccount
+This report will display users by account in order of utilization without
+grouping multiple accounts by user into one, but displaying them
+on separate lines.
+.TP
+.B cluster Utilization
+This report will display total usage divided by Allocated, Down,
+Idle, and resrved time for selected clusters.  Reserved time
+refers to time that a job was waiting for resources after the job
+had become eligible.  If the value is not of importance for you
+the number should be grouped with idle time.
+
+.TP
+.B job Sizes
+This report will dispay the amount of time used for job ranges
+specified by the 'grouping=' option.  Only a single level in the tree
+is displayed defaulting to the root dir.  If you specify other
+accounts with the 'account=' option you will receive those accounts
+sub accounts.
+
+.TP
+.B user TopUsage
+Displays the top users on a cluster.  Use the group option to group
+accounts together.  The default is to have a different line for each
+user account combination.  
 
 .TP
 Each report type has various options...
@@ -119,6 +197,10 @@ CLUSTER
 .TP
 .B Names=<OPT>
 List of clusters to include in report.  Default is local cluster.
+.TP
+.B Tree
+When used with the AccountUtilizationByUser report will span the
+accounts as they in the hierarchy.
 .RE
 
 .TP
@@ -143,6 +225,10 @@ List of jobs/steps to include in report.  Default is all.
 .B Partitions=<OPT>
 List of partitions jobs ran on to include in report.  Default is all.
 .TP
+.B PrintJobCount
+When used with the Sizes report will print number of jobs ran instead
+of time used.  
+.TP
 .B Users=<OPT>
 List of users jobs to include in report.  Default is all.
 .RE
@@ -191,4 +277,4 @@ FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
 
 .SH "SEE ALSO"
-\fBslurmdbd\fR(8)
+\fBsacct\fR(1), \fBslurmdbe\fR(8)
diff --git a/doc/man/man1/srun.1 b/doc/man/man1/srun.1
index 49fa02da3..f0bc69a07 100644
--- a/doc/man/man1/srun.1
+++ b/doc/man/man1/srun.1
@@ -98,6 +98,11 @@ The \fIlist\fR of constraints may include multiple features separated
 by ampersand (AND) and/or vertical bar (OR) operators.
 For example: \fB\-\-constraint="opteron&video"\fR or 
 \fB\-\-constraint="fast|faster"\fR.
+In the first example, only nodes having both the feature "opteron" AND
+the feature "video" will be used.
+There is no mechanism to specify that you want one node with feature
+"opteron" and another node with feature "video" in that case that no
+node has both features.
 If only one of a set of possible options should be used for all allocated 
 nodes, then use the OR operator and enclose the options within square brackets. 
 For example: "\fB\-\-constraint="[rack1|rack2|rack3|rack4]"\fR might 
@@ -726,6 +731,10 @@ in some failed state (non-zero exit code, node failure, timed out, etc).
 \fBafterok:job_id[:jobid...]\fR
 This job can begin execution after the specified jobs have successfully
 executed (ran to completion with non-zero exit code).
+.TP
+\fBsingleton\fR
+This job can begin execution after any previously launched jobs sharing the same
+job name and user have terminated.
 .RE
 
 .TP
@@ -1211,7 +1220,9 @@ Same as \fB\-\-exclusive\fR
 Same as \fB\-g, \-\-geometry\fR=\fIX,Y,Z\fR
 .TP
 \fBSLURM_JOB_NAME\fR
-\fB\-J, \-\-job\-name\fR=\fIjobname\fR
+Same as \fB\-J, \-\-job\-name\fR=\fIjobname\fR except within an existing 
+allocation, in which case it is ignored to avoid using the batch job's name
+as the name of each job step. 
 .TP
 \fBSLURM_LABELIO\fR
 Same as \fB\-l, \-\-label\fR
@@ -1318,10 +1329,13 @@ on this node allocated to the job.
 \fBSLURM_GTIDS\fR
 Global task IDs running on this node.
 Zero origin and comma separated.
-
+.TP
+\fBSLURM_JOB_DEPENDENCY\fR
+Set to value of the \-\-dependency option.
 .TP
 \fBSLURM_JOBID\fR
 Job id of the executing job
+
 .TP
 \fBSLURM_LAUNCH_NODE_IPADDR\fR
 IP address of the node from which the task launch was 
diff --git a/doc/man/man3/slurm_allocate_resources.3 b/doc/man/man3/slurm_allocate_resources.3
index e74f053d2..d157071c9 100644
--- a/doc/man/man3/slurm_allocate_resources.3
+++ b/doc/man/man3/slurm_allocate_resources.3
@@ -1,9 +1,11 @@
 .TH "Slurm API" "3" "April 2006" "Morris Jette" "Slurm job initiation functions"
 .SH "NAME"
-slurm_allocate_resources, 
-slurm_allocation_lookup, slurm_confirm_allocation, 
+slurm_allocate_resources, slurm_allocate_resources_blocking,
+slurm_allocation_msg_thr_create, slurm_allocation_msg_thr_destroy,
+slurm_allocation_lookup, slurm_allocation_lookup_lite,
+slurm_confirm_allocation, 
 slurm_free_submit_response_response_msg, slurm_init_job_desc_msg, 
-slurm_job_will_run, slurm_submit_batch_job
+slurm_job_will_run, slurm_read_hostfile, slurm_submit_batch_job
 \- Slurm job initiation functions
 .SH "SYNTAX"
 .LP 
@@ -17,6 +19,28 @@ int \fBslurm_allocate_resources\fR (
 .br 
 );
 .LP 
+resource_allocation_response_msg_t *\fBslurm_allocate_resources_blocking\fR (
+.br 
+	job_desc_msg_t *\fIjob_desc_msg_ptr\fP,
+.br 
+	time_t \fItimeout\fP, void \fI(*pending_callback)(uint32_t job_id)\fP
+.br 
+);
+.LP 
+allocation_msg_thread_t *\fBslurm_allocation_msg_thr_create\fR (
+.br 
+	uint16_t *\fIport\fP,
+.br 
+	slurm_allocation_callbacks_t *\fIcallbacks\fP
+.br 
+);
+.LP 
+void *\fBslurm_allocation_msg_thr_destroy\fR (
+.br 
+	allocation_msg_thread_t *\fIslurm_alloc_msg_thr_ptr\fP
+.br 
+);
+.LP 
 int \fBslurm_allocation_lookup\fR {
 .br
 	uint32_t \fIjobid\fP,
@@ -25,6 +49,14 @@ int \fBslurm_allocation_lookup\fR {
 .br
 );
 .LP 
+int \fBslurm_allocation_lookup_lite\fR {
+.br
+	uint32_t \fIjobid\fP,
+.br
+	resource_allocation_response_msg_t **\fIslurm_alloc_msg_pptr\fP
+.br
+);
+.LP 
 int \fBslurm_confirm_allocation\fR (
 .br 
 	old_job_alloc_msg_t *\fIold_job_desc_msg_ptr\fP,
@@ -57,6 +89,12 @@ int \fBslurm_job_will_run\fR (
 .br 
 );
 .LP
+int \fBslurm_read_hostfile\fR (
+.br 
+	char *\fIfilename\fP, int \fIn\fP
+.br 
+);
+.LP
 int \fBslurm_submit_batch_job\fR (
 .br 
 	job_desc_msg_t *\fIjob_desc_msg_ptr\fP,
@@ -70,6 +108,10 @@ int \fBslurm_submit_batch_job\fR (
 \fIjob_desc_msg_ptr\fP
 Specifies the pointer to a job request specification. See slurm.h for full details 
 on the data structure's contents. 
+.TP
+\fIcallbacks\fP
+Specifies the pointer to a allocation callbacks structure.  See
+slurm.h for full details on the data structure's contents.
 .TP 
 \fIold_job_desc_msg_ptr\fP
 Specifies the pointer to a description of an existing job. See slurm.h for 
@@ -83,13 +125,21 @@ structure's contents.
 .TP 
 \fIslurm_alloc_msg_ptr\fP
 Specifies the pointer to the structure to be created and filled in by the function 
-\fIslurm_allocate_resources\fP, \fIslurm_allocation_lookup\fP, 
+\fIslurm_allocate_resources\fP,
+\fIslurm_allocate_resources_blocking\fP,
+\fIslurm_allocation_lookup\fP, \fIslurm_allocation_lookup_lite\fP, 
 \fIslurm_confirm_allocation\fP or \fIslurm_job_will_run\fP.
 .TP 
+\fIslurm_alloc_msg_thr_ptr\fP
+Specigies the pointer to the structure created and returned by the
+function \fIslurm_allocation_msg_thr_create\fP.  Must be destroyed
+with function \fIslurm_allocation_msg_thr_destroy\fP.
+.TP 
 \fIslurm_submit_msg_pptr\fP
 Specifies the double pointer to the structure to be created and filled with a description 
-of the created job: job ID, etc. See slurm.h for full details on the data structure's contents. 
-.TP 
+of the created job: job ID, etc. See slurm.h for full details on the
+data structure's contents. 
+.TP
 \fIslurm_submit_msg_ptr\fP
 Specifies the pointer to the structure to be created and filled in by the function \fIslurm_submit_batch_job\fP.
 .SH "DESCRIPTION"
@@ -100,16 +150,43 @@ count or time allocation are outside of the partition's limits then a job
 entry will be created, a warning indication will be placed in the \fIerror_code\fP field of the response message, and the job will be left 
 queued until the partition's limits are changed.
 Always release the response message when no longer required using 
-the function \fBslurm_free_resource_allocation_response_msg\fR.
-.LP 
+the function \fBslurm_free_resource_allocation_response_msg\fR.  This
+function only makes the request once.  If the allocation is not
+avaliable immediately the node_cnt variable in the resp will be 0.  If
+you want a function that will block until either an error is recieved
+or an allocation is granted you can use the
+\fIslurm_allocate_resources_blocking\fP function described below. 
+.LP
+\fBslurm_allocate_resources_blocking\fR Request a resource allocation for a
+job.  This call will block until the allocation is granted, an error
+occurs, or the specified timeout limit is reached.  The \fIpending_callback\fP
+parameter will be called if the allocation is not avaliable
+immediately and the immedite flag is not set in the request.  This can
+be used to get the jobid of the job while waiting for the allocation
+to become avaliable.  On failure NULL is returned and errno is set.
+.LP
+\fBslurm_allocation_msg_thr_create\fR Startup a message handler
+talking with the controller dealing with messages from the controller
+during an allocation. Callback functions are declared in the
+\fIcallbacks\fP parameter and will be called when a corresponding
+message is recieved from the controller.  This message thread is
+needed to receive messages from the controller about node failure in
+an allocation and other important messages.  Although technically not
+required, it could be very helpful to inform about problems with the
+allocation. 
+.LP
+\fBslurm_allocation_msg_thr_destroy\fR Shutdown the message handler
+ talking with the controller dealing with messages from the controller during
+ an allocation. 
+.LP
 \fBslurm_confirm_allocation\fR Return detailed information on a specific 
 existing job allocation. \fBOBSOLETE FUNCTION: Use slurm_allocation_lookup
 instead.\fR This function may only be successfully executed by the job's 
 owner or user root.
 .LP 
 \fBslurm_free_resource_allocation_response_msg\fR Release the storage generated in response 
-to a call of the function \fBslurm_allocate_resources\fR, or
-\fBslurm_allocation_lookup\fR.
+to a call of the function \fBslurm_allocate_resources\fR, 
+\fBslurm_allocation_lookup\fR, or \fBslurm_allocation_lookup_lite\fR.
 .LP 
 \fBslurm_free_submit_response_msg\fR Release the storage generated in response 
 to a call of the function \fBslurm_submit_batch_job\fR.
@@ -119,6 +196,13 @@ Execute this function before issuing a request to submit or modify a job.
 .LP 
 \fBslurm_job_will_run\fR Determine if the supplied job description could be executed immediately. 
 .LP 
+\fBslurm_read_hostfile\fR Read a SLURM hostfile specified by
+"filename".  "filename" must contain a list of SLURM NodeNames, one
+per line.  Reads up to "n" number of hostnames from the file. Returns
+a string representing a hostlist ranged string of the contents
+of the file.  This is a helper function, it does not contact any SLURM
+daemons.   
+.LP 
 \fBslurm_submit_batch_job\fR Submit a job for later execution. Note that if 
 the job's requested node count or time allocation are outside of the partition's limits then a job entry will be created, a warning indication will be placed in the \fIerror_code\fP field of the response message, and the job will be left queued until the partition's limits are changed and resources are available.  Always release the response message when no 
 longer required using the function \fBslurm_free_submit_response_msg\fR.
@@ -186,10 +270,14 @@ the partition's time limit.
 .LP
 \fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with 
 SLURM controller.
-.SH "EXAMPLE"
+.SH "NON-BLOCKING EXAMPLE"
 .LP 
 #include <stdio.h>
 .br
+#include <stdlib.h>
+.br
+#include <signal.h>
+.br
 #include <slurm/slurm.h>
 .br
 #include <slurm/slurm_errno.h>
@@ -202,16 +290,20 @@ int main (int argc, char *argv[])
 .br 
 	resource_allocation_response_msg_t* slurm_alloc_msg_ptr ;
 .LP 
-	slurm_init_job_desc_msg( &job_mesg );
+	slurm_init_job_desc_msg( &job_desc_msg );
 .br 
-	job_mesg. name = ("job01\0");
+	job_desc_msg. name = ("job01\0");
 .br 
-	job_mesg. min_memory = 1024;
+	job_desc_msg. job_min_memory = 1024;
 .br 
-	job_mesg. time_limit = 200;
+	job_desc_msg. time_limit = 200;
 .br 
-	job_mesg. num_nodes = 400;
+	job_desc_msg. min_nodes = 400;
 .br 
+	job_desc_msg. user_id = getuid();
+.br
+	job_desc_msg. group_id = getgid();
+.br
 	if (slurm_allocate_resources(&job_desc_msg,
 .br
 	                             &slurm_alloc_msg_ptr)) {
@@ -228,9 +320,77 @@ int main (int argc, char *argv[])
 .br
 	        slurm_alloc_msg_ptr\->job_id );
 .br 
-	if (slurm_job_kill(slurm_alloc_msg_ptr\->
+	if (slurm_kill_job(slurm_alloc_msg_ptr\->job_id, SIGKILL, 0)) {
+.br 
+		printf ("kill errno %d\\n", slurm_get_errno());
+.br 
+		exit (1);
+.br 
+	}
+.br
+	printf ("canceled job_id %u\\n", 
+.br
+	        slurm_alloc_msg_ptr\->job_id );
+.br 
+	slurm_free_resource_allocation_response_msg(
+.br
+			slurm_alloc_msg_ptr);
+.br 
+	exit (0);
+.br 
+}
+
+.SH "BLOCKING EXAMPLE"
+.LP 
+#include <stdio.h>
+.br
+#include <stdlib.h>
 .br
-	                     job_id, SIGKILL)) {
+#include <signal.h>
+.br
+#include <slurm/slurm.h>
+.br
+#include <slurm/slurm_errno.h>
+.LP 
+int main (int argc, char *argv[])
+.br 
+{
+.br 
+	job_desc_msg_t job_desc_msg;
+.br 
+	resource_allocation_response_msg_t* slurm_alloc_msg_ptr ;
+.LP 
+	slurm_init_job_desc_msg( &job_desc_msg );
+.br 
+	job_desc_msg. name = ("job01\0");
+.br 
+	job_desc_msg. job_min_memory = 1024;
+.br 
+	job_desc_msg. time_limit = 200;
+.br 
+	job_desc_msg. min_nodes = 400;
+.br 
+	job_desc_msg. user_id = getuid();
+.br
+	job_desc_msg. group_id = getgid();
+.br
+	if (!(slurm_alloc_msg_ptr = 
+.br
+	      slurm_allocate_resources_blocking(&job_desc_msg, 0, NULL))) {
+.br
+		slurm_perror ("slurm_allocate_resources_blocking error");
+.br
+		exit (1);
+.br
+	}
+.br 
+	printf ("Allocated nodes %s to job_id %u\\n", 
+.br 
+	        slurm_alloc_msg_ptr\->node_list, 
+.br
+	        slurm_alloc_msg_ptr\->job_id );
+.br 
+	if (slurm_kill_job(slurm_alloc_msg_ptr\->job_id, SIGKILL, 0)) {
 .br 
 		printf ("kill errno %d\\n", slurm_get_errno());
 .br 
diff --git a/doc/man/man3/slurm_allocate_resources_blocking.3 b/doc/man/man3/slurm_allocate_resources_blocking.3
new file mode 100644
index 000000000..6534eeb96
--- /dev/null
+++ b/doc/man/man3/slurm_allocate_resources_blocking.3
@@ -0,0 +1 @@
+.so man3/slurm_allocate_resources.3
diff --git a/doc/man/man3/slurm_allocation_lookup_lite.3 b/doc/man/man3/slurm_allocation_lookup_lite.3
new file mode 100644
index 000000000..6534eeb96
--- /dev/null
+++ b/doc/man/man3/slurm_allocation_lookup_lite.3
@@ -0,0 +1 @@
+.so man3/slurm_allocate_resources.3
diff --git a/doc/man/man3/slurm_allocation_msg_thr_create.3 b/doc/man/man3/slurm_allocation_msg_thr_create.3
new file mode 100644
index 000000000..6534eeb96
--- /dev/null
+++ b/doc/man/man3/slurm_allocation_msg_thr_create.3
@@ -0,0 +1 @@
+.so man3/slurm_allocate_resources.3
diff --git a/doc/man/man3/slurm_allocation_msg_thr_destroy.3 b/doc/man/man3/slurm_allocation_msg_thr_destroy.3
new file mode 100644
index 000000000..6534eeb96
--- /dev/null
+++ b/doc/man/man3/slurm_allocation_msg_thr_destroy.3
@@ -0,0 +1 @@
+.so man3/slurm_allocate_resources.3
diff --git a/doc/man/man3/slurm_checkpoint_task_complete.3 b/doc/man/man3/slurm_checkpoint_task_complete.3
new file mode 100644
index 000000000..32120a6bb
--- /dev/null
+++ b/doc/man/man3/slurm_checkpoint_task_complete.3
@@ -0,0 +1 @@
+.so man3/slurm_checkpoint_error.3
diff --git a/doc/man/man3/slurm_clear_trigger.3 b/doc/man/man3/slurm_clear_trigger.3
new file mode 100644
index 000000000..2fd720318
--- /dev/null
+++ b/doc/man/man3/slurm_clear_trigger.3
@@ -0,0 +1 @@
+.so man3/slurm_trigger.3
diff --git a/doc/man/man3/slurm_free_job_alloc_info_response_msg.3 b/doc/man/man3/slurm_free_job_alloc_info_response_msg.3
new file mode 100644
index 000000000..836ffa79b
--- /dev/null
+++ b/doc/man/man3/slurm_free_job_alloc_info_response_msg.3
@@ -0,0 +1 @@
+.so man3/slurm_free_job_info_msg.3
diff --git a/doc/man/man3/slurm_free_job_info_msg.3 b/doc/man/man3/slurm_free_job_info_msg.3
index c31031800..ce1b8dabd 100644
--- a/doc/man/man3/slurm_free_job_info_msg.3
+++ b/doc/man/man3/slurm_free_job_info_msg.3
@@ -1,7 +1,7 @@
 .TH "Slurm API" "3" "September 2006" "Morris Jette" "Slurm job information reporting functions"
 .SH "NAME"
-slurm_free_job_info_msg, slurm_get_end_time,
-slurm_get_rem_time, slurm_get_select_jobinfo,
+slurm_free_job_alloc_info_response_msg, slurm_free_job_info_msg, 
+slurm_get_end_time, slurm_get_rem_time, slurm_get_select_jobinfo,
 slurm_load_jobs, slurm_pid2jobid, 
 slurm_print_job_info, slurm_print_job_info_msg
 \- Slurm job information reporting functions
@@ -19,6 +19,12 @@ ISLURM_GET_REM_TIME, ISLURM_GET_REM_TIME2
 .br
 #include <sys/types.h>
 .LP
+void \fBslurm_free_job_alloc_info_response_msg\fR (
+.br 
+	job_alloc_info_response_msg_t *\fIjob_alloc_info_msg_ptr\fP
+.br 
+);
+.LP 
 void \fBslurm_free_job_info_msg\fR (
 .br 
 	job_info_msg_t *\fIjob_info_msg_ptr\fP
@@ -180,6 +186,9 @@ greater than the last time changes where made to that information, new
 information is not returned.  Otherwise all the configuration. job, node, 
 or partition records are returned.
 .SH "DESCRIPTION"
+.JP
+\fBslurm_free_resource_allocation_response_msg\fR Free slurm resource
+allocation response message.
 .LP 
 \fBslurm_free_job_info_msg\fR Release the storage generated by the 
 \fBslurm_load_jobs\fR function.
diff --git a/doc/man/man3/slurm_free_node_info_msg.3 b/doc/man/man3/slurm_free_node_info_msg.3
new file mode 100644
index 000000000..63979eec2
--- /dev/null
+++ b/doc/man/man3/slurm_free_node_info_msg.3
@@ -0,0 +1 @@
+.so man3/slurm_free_node_info.3
diff --git a/doc/man/man3/slurm_free_partition_info_msg.3 b/doc/man/man3/slurm_free_partition_info_msg.3
new file mode 100644
index 000000000..0e99ece36
--- /dev/null
+++ b/doc/man/man3/slurm_free_partition_info_msg.3
@@ -0,0 +1 @@
+.so man3/slurm_free_partition_info.3
diff --git a/doc/man/man3/slurm_free_slurmd_status.3 b/doc/man/man3/slurm_free_slurmd_status.3
new file mode 100644
index 000000000..d7153f138
--- /dev/null
+++ b/doc/man/man3/slurm_free_slurmd_status.3
@@ -0,0 +1 @@
+.so man3/slurm_slurmd_status.3
diff --git a/doc/man/man3/slurm_free_trigger_msg.3 b/doc/man/man3/slurm_free_trigger_msg.3
new file mode 100644
index 000000000..2fd720318
--- /dev/null
+++ b/doc/man/man3/slurm_free_trigger_msg.3
@@ -0,0 +1 @@
+.so man3/slurm_trigger.3
diff --git a/doc/man/man3/slurm_get_checkpoint_file_path.3 b/doc/man/man3/slurm_get_checkpoint_file_path.3
new file mode 100644
index 000000000..32120a6bb
--- /dev/null
+++ b/doc/man/man3/slurm_get_checkpoint_file_path.3
@@ -0,0 +1 @@
+.so man3/slurm_checkpoint_error.3
diff --git a/doc/man/man3/slurm_get_triggers.3 b/doc/man/man3/slurm_get_triggers.3
new file mode 100644
index 000000000..2fd720318
--- /dev/null
+++ b/doc/man/man3/slurm_get_triggers.3
@@ -0,0 +1 @@
+.so man3/slurm_trigger.3
diff --git a/doc/man/man3/slurm_job_step_layout_free.3 b/doc/man/man3/slurm_job_step_layout_free.3
new file mode 100644
index 000000000..f600ff5c1
--- /dev/null
+++ b/doc/man/man3/slurm_job_step_layout_free.3
@@ -0,0 +1 @@
+.so man3/slurm_free_job_step_info_response_msg.3
diff --git a/doc/man/man3/slurm_job_step_layout_get.3 b/doc/man/man3/slurm_job_step_layout_get.3
new file mode 100644
index 000000000..f600ff5c1
--- /dev/null
+++ b/doc/man/man3/slurm_job_step_layout_get.3
@@ -0,0 +1 @@
+.so man3/slurm_free_job_step_info_response_msg.3
diff --git a/doc/man/man3/slurm_kill_job.3 b/doc/man/man3/slurm_kill_job.3
index 0f6685bcf..e07d6f7db 100644
--- a/doc/man/man3/slurm_kill_job.3
+++ b/doc/man/man3/slurm_kill_job.3
@@ -1,6 +1,8 @@
 .TH "Slurm API" "3" "November 2003" "Morris Jette" "Slurm job signal calls"
 .SH "NAME"
-slurm_kill_job, slurm_kill_job_step \- Slurm job signal calls
+slurm_kill_job, slurm_kill_job_step,
+slurm_signal_job, slurm_signal_job_step,
+slurm_terminate_job, slurm_terminate_job_step \- Slurm job signal calls
 .SH "SYNTAX"
 .LP 
 #include <slurm/slurm.h>
@@ -24,6 +26,38 @@ int \fBslurm_kill_job_step\fR (
 	uint16_t \fIsignal\fP
 .br 
 );
+.LP
+int \fBslurm_signal_job\fR (
+.br 
+	uint32_t \fIjob_id\fP,
+.br 
+	uint16_t \fIsignal\fP
+.br 
+);
+.LP
+int \fBslurm_signal_job_step\fR (
+.br 
+	uint32_t \fIjob_id\fP,
+.br 
+	uint32_t \fIjob_step_id\fP,
+.br 
+	uint16_t \fIsignal\fP
+.br 
+);
+.LP
+int \fBslurm_terminate_job\fR (
+.br 
+	uint32_t \fIjob_id\fP,
+.br 
+);
+.LP
+int \fBslurm_terminate_job_step\fR (
+.br 
+	uint32_t \fIjob_id\fP,
+.br 
+	uint32_t \fIjob_step_id\fP,
+.br 
+);
 .SH "ARGUMENTS"
 .LP 
 \fIbatch_flag\fP
@@ -46,6 +80,18 @@ This function may only be successfully executed by the job's owner or user root.
 .LP 
 \fBslurm_kill_job_step\fR Request that a signal be sent to a specific job step. 
 This function may only be successfully executed by the job's owner or user root.
+\fBslurm_signal_job\fR Request that send the specified signal to all
+steps of an existing job. 
+\fBslurm_signal_job_step\fR Request that send the specified signal to
+an existing job step. 
+\fBslurm_terminate_job\fR Request that terminates all steps of an
+existing job by sending a REQUEST_TERMINATE_JOB rpc to all slurmd in
+the the job allocation, and then calls slurm_complete_job(). 
+\fBslurm_signal_job_step\fR Request that terminates a job step by
+sending a REQUEST_TERMINATE_TASKS rpc to all slurmd of a job step, and
+then calls slurm_complete_job_step() after verifying that all nodes in
+the job step no longer have running tasks from the job step.  (May
+take over 35 seconds to return.) 
 .SH "RETURN VALUE"
 .LP
 On success, zero is returned. On error, \-1 is returned, and Slurm error code is set appropriately.
diff --git a/doc/man/man3/slurm_load_slurmd_status.3 b/doc/man/man3/slurm_load_slurmd_status.3
new file mode 100644
index 000000000..d7153f138
--- /dev/null
+++ b/doc/man/man3/slurm_load_slurmd_status.3
@@ -0,0 +1 @@
+.so man3/slurm_slurmd_status.3
diff --git a/doc/man/man3/slurm_notify_job.3 b/doc/man/man3/slurm_notify_job.3
new file mode 100644
index 000000000..836ffa79b
--- /dev/null
+++ b/doc/man/man3/slurm_notify_job.3
@@ -0,0 +1 @@
+.so man3/slurm_free_job_info_msg.3
diff --git a/doc/man/man3/slurm_ping.3 b/doc/man/man3/slurm_ping.3
new file mode 100644
index 000000000..8c2ed9814
--- /dev/null
+++ b/doc/man/man3/slurm_ping.3
@@ -0,0 +1 @@
+.so man3/slurm_reconfigure.3
diff --git a/doc/man/man3/slurm_print_slurmd_status.3 b/doc/man/man3/slurm_print_slurmd_status.3
new file mode 100644
index 000000000..d7153f138
--- /dev/null
+++ b/doc/man/man3/slurm_print_slurmd_status.3
@@ -0,0 +1 @@
+.so man3/slurm_slurmd_status.3
diff --git a/doc/man/man3/slurm_read_hostfile.3 b/doc/man/man3/slurm_read_hostfile.3
new file mode 100644
index 000000000..6534eeb96
--- /dev/null
+++ b/doc/man/man3/slurm_read_hostfile.3
@@ -0,0 +1 @@
+.so man3/slurm_allocate_resources.3
diff --git a/doc/man/man3/slurm_set_debug_level.3 b/doc/man/man3/slurm_set_debug_level.3
new file mode 100644
index 000000000..8c2ed9814
--- /dev/null
+++ b/doc/man/man3/slurm_set_debug_level.3
@@ -0,0 +1 @@
+.so man3/slurm_reconfigure.3
diff --git a/doc/man/man3/slurm_set_trigger.3 b/doc/man/man3/slurm_set_trigger.3
new file mode 100644
index 000000000..2fd720318
--- /dev/null
+++ b/doc/man/man3/slurm_set_trigger.3
@@ -0,0 +1 @@
+.so man3/slurm_trigger.3
diff --git a/doc/man/man3/slurm_signal_job.3 b/doc/man/man3/slurm_signal_job.3
new file mode 100644
index 000000000..90c80c4ee
--- /dev/null
+++ b/doc/man/man3/slurm_signal_job.3
@@ -0,0 +1 @@
+.so man3/slurm_kill_job.3
diff --git a/doc/man/man3/slurm_signal_job_step.3 b/doc/man/man3/slurm_signal_job_step.3
new file mode 100644
index 000000000..90c80c4ee
--- /dev/null
+++ b/doc/man/man3/slurm_signal_job_step.3
@@ -0,0 +1 @@
+.so man3/slurm_kill_job.3
diff --git a/doc/man/man3/slurm_slurmd_status.3 b/doc/man/man3/slurm_slurmd_status.3
new file mode 100644
index 000000000..87c4badc1
--- /dev/null
+++ b/doc/man/man3/slurm_slurmd_status.3
@@ -0,0 +1,67 @@
+.TH "Slurm API" "3" "Oct 2008" "Danny Auble" "Slurmd status functions"
+
+.SH "NAME"
+
+slurm_free_slurmd_status, slurm_load_slurmd_status, slurm_print_slurmd_status
+
+.SH "SYNTAX"
+.LP 
+#include <slurm/slurm.h>
+.LP
+.LP
+void \fBslurm_free_slurmd_status\fR (
+.br
+	slurmd_status_t* \fIslurmd_status_ptr\fP 
+.br
+);
+.LP
+int \fBslurm_load_slurmd_status\fR (
+.br
+	slurmd_status_t** \fIslurmd_status_ptr\fP 
+.br
+);
+.LP
+void \fBslurm_print_slurmd_status\fR (
+.br
+	FILE *\fIout\fP,
+.br
+	slurmd_status_t* \fIslurmd_status_pptr\fP 
+.br
+);
+
+.SH "ARGUMENTS"
+.LP 
+.TP
+\fIslurmd_status_ptr\fP 
+Slurmd status pointer.  Created by \fBslurm_load_slurmd_status\fR,
+used in subsequent function calls, and destroyed by
+\fBslurm_free_slurmd_status\fR.
+
+.SH "DESCRIPTION"
+.LP
+\fBslurm_free_slurmd_status\fR free slurmd state information.
+.LP 
+\fBslurm_load_slurmd_status\fR issue RPC to get the status of slurmd
+daemon on this machine.
+.LP
+\fBslurm_print_slurmd_status\fR output the contents of slurmd status
+message as loaded using slurm_load_slurmd_status.
+
+.SH "COPYING"
+Copyright (C) 2006-2007 The Regents of the University of California.
+Copyright (C) 2008 Lawrence Livermore National Security.
+Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+LLNL\-CODE\-402394.
+.LP
+This file is part of SLURM, a resource management program.
+For details, see <https://computing.llnl.gov/linux/slurm/>.
+.LP
+SLURM is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2 of the License, or (at your option)
+any later version.
+.LP
+SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+details.
diff --git a/doc/man/man3/slurm_sprint_job_info.3 b/doc/man/man3/slurm_sprint_job_info.3
new file mode 100644
index 000000000..836ffa79b
--- /dev/null
+++ b/doc/man/man3/slurm_sprint_job_info.3
@@ -0,0 +1 @@
+.so man3/slurm_free_job_info_msg.3
diff --git a/doc/man/man3/slurm_sprint_job_step_info.3 b/doc/man/man3/slurm_sprint_job_step_info.3
new file mode 100644
index 000000000..f600ff5c1
--- /dev/null
+++ b/doc/man/man3/slurm_sprint_job_step_info.3
@@ -0,0 +1 @@
+.so man3/slurm_free_job_step_info_response_msg.3
diff --git a/doc/man/man3/slurm_sprint_node_table.3 b/doc/man/man3/slurm_sprint_node_table.3
new file mode 100644
index 000000000..63979eec2
--- /dev/null
+++ b/doc/man/man3/slurm_sprint_node_table.3
@@ -0,0 +1 @@
+.so man3/slurm_free_node_info.3
diff --git a/doc/man/man3/slurm_sprint_partition_info.3 b/doc/man/man3/slurm_sprint_partition_info.3
new file mode 100644
index 000000000..0e99ece36
--- /dev/null
+++ b/doc/man/man3/slurm_sprint_partition_info.3
@@ -0,0 +1 @@
+.so man3/slurm_free_partition_info.3
diff --git a/doc/man/man3/slurm_step_ctx_create.3 b/doc/man/man3/slurm_step_ctx_create.3
index c949a221f..54a96c299 100644
--- a/doc/man/man3/slurm_step_ctx_create.3
+++ b/doc/man/man3/slurm_step_ctx_create.3
@@ -1,7 +1,9 @@
 .TH "Slurm API" "3" "March 2007" "Morris Jette" "Slurm job step context functions"
 
 .SH "NAME"
-slurm_step_ctx_create, slurm_step_ctx_get, slurm_jobinfo_ctx_get,
+slurm_step_ctx_create, slurm_step_ctx_create_no_alloc,
+slurm_step_ctx_daemon_per_node_hack, slurm_step_ctx_get,
+slurm_step_ctx_params_t_init, slurm_jobinfo_ctx_get,
 slurm_spawn_kill, slurm_step_ctx_destroy \- Slurm task spawn functions
 
 .SH "SYNTAX"
@@ -11,13 +13,25 @@ slurm_spawn_kill, slurm_step_ctx_destroy \- Slurm task spawn functions
 .LP
 slurm_step_ctx \fBslurm_step_ctx_create\fR (
 .br
-	job_step_create_request_msg_t *\fIstep_req\fP 
+	slurm_step_ctx_params_t *\fIstep_req\fP 
+.br
+);
+.LP
+slurm_step_ctx \fBslurm_step_ctx_create_no_alloc\fR (
+.br
+	slurm_step_ctx_params_t *\fIstep_req\fP 
+.br
+);
+.LP
+int \fBslurm_step_ctx_daemon_per_node_hack\fR (
+.br
+	slurm_step_ctx_t *\fIctx\fP
 .br
 );
 .LP
 int \fBslurm_step_ctx_get\fR (
 .br
-	slurm_step_ctx \fIctx\fP,
+	slurm_step_ctx_t *\fIctx\fP,
 .br
 	int \fIctx_key\fP,
 .br
@@ -35,6 +49,12 @@ int \fBslurm_jobinfo_ctx_get\fR (
 .br
 );
 .LP
+void \fBslurm_step_ctx_params_t_init\fR (
+.br
+	slurm_step_ctx_params_t *\fIstep_req\fP 
+.br
+);
+.LP
 int \fBslurm_spawn\fR {
 .br
 	slurm_step_ctx \fIctx\fP,
@@ -64,7 +84,8 @@ Specifies the pointer to the structure with job step request specification. See
 slurm.h for full details on the data structure's contents.
 .TP
 \fIctx\fP
-Job step context. Created by \fBslurm_step_ctx_create\fR, used in subsequent
+Job step context. Created by \fBslurm_step_ctx_create\fR, or
+\fBslurm_step_ctx_create_no_alloc\fR used in subsequent
 function calls, and destroyed by \fBslurm_step_ctx_destroy\fR.
 .TP
 \fIctx_key\fP
@@ -99,11 +120,22 @@ finished. NOTE: this function creates a slurm job step. Call \fBslurm_spawn\fR
 in a timely fashion to avoid having job step credentials time out. If
 \fBslurm_spawn\fR is not used, explicitly cancel the job step. 
 .LP
+\fBslurm_step_ctx_create_no_alloc\fR Same as above, only no
+allocation is made. To avoid memory leaks call
+\fBslurm_step_ctx_destroy\fR when the use of this context is finished.
+.LP
+\fBslurm_step_ctx_daemon_per_node_hack\fR Hack the step context to run
+a single process per node, regardless of the settings selected at
+slurm_step_ctx_create time. 
+.LP
 \fBslurm_step_ctx_get\fR Get values from a job step context.
 \fIctx_key\fP identifies the fields to be gathered from the job step context.
 Subsequent arguments to this function are dependent upon the value
 of \fIctx_key\fP. See the \fBCONTEXT KEYS\fR section for details.
 .LP
+\fBslurm_step_ctx_params_t_init\fR This initializes parameters in the
+structure that you will pass to slurm_step_ctx_create().
+.LP
 \fBslurm_spawn\fR Spawn tasks based upon a job step context
 and establish communications with the tasks using the socket 
 file descriptors specified.
diff --git a/doc/man/man3/slurm_step_ctx_create_no_alloc.3 b/doc/man/man3/slurm_step_ctx_create_no_alloc.3
new file mode 100644
index 000000000..6c5422fcb
--- /dev/null
+++ b/doc/man/man3/slurm_step_ctx_create_no_alloc.3
@@ -0,0 +1 @@
+.so man3/slurm_step_ctx_create.3
diff --git a/doc/man/man3/slurm_step_ctx_daemon_per_node_hack.3 b/doc/man/man3/slurm_step_ctx_daemon_per_node_hack.3
new file mode 100644
index 000000000..6c5422fcb
--- /dev/null
+++ b/doc/man/man3/slurm_step_ctx_daemon_per_node_hack.3
@@ -0,0 +1 @@
+.so man3/slurm_step_ctx_create.3
diff --git a/doc/man/man3/slurm_step_ctx_params_t_init.3 b/doc/man/man3/slurm_step_ctx_params_t_init.3
new file mode 100644
index 000000000..6c5422fcb
--- /dev/null
+++ b/doc/man/man3/slurm_step_ctx_params_t_init.3
@@ -0,0 +1 @@
+.so man3/slurm_step_ctx_create.3
diff --git a/doc/man/man3/slurm_step_launch.3 b/doc/man/man3/slurm_step_launch.3
index 5434d9f81..6e6c10c20 100644
--- a/doc/man/man3/slurm_step_launch.3
+++ b/doc/man/man3/slurm_step_launch.3
@@ -2,7 +2,8 @@
 
 .SH "NAME"
 
-slurm_step_launch_params_t_init, slurm_step_launch, slurm_step_launch_wait_start,
+slurm_step_launch_params_t_init, slurm_step_launch,
+slurm_step_launch_fwd_signal, slurm_step_launch_wait_start,
 slurm_step_launch_wait_finish, slurm_step_launch_abort \- Slurm job step launch functions
 
 .SH "SYNTAX"
@@ -28,6 +29,14 @@ int \fBslurm_step_launch\fR (
 .br
 );
 .LP
+void \fBslurm_step_launch_fwd_signal\fR (
+.br
+	slurm_step_ctx \fIctx\fP,
+.br
+	int \fIsigno\fP
+.br
+);
+.LP
 int \fBslurm_step_launch_wait_start\fR (
 .br
 	slurm_step_ctx \fIctx\fP
@@ -74,6 +83,9 @@ default values.  This function will NOT allocate any new memory.
 .LP
 \fBslurm_step_launch\fR Launch a parallel job step.
 .LP
+\fBslurm_step_launch_fwd_signal\fR Forward a signal to all those nodes
+with running tasks.
+.LP
 \fBslurm_step_launch_wait_start\fR Block until all tasks have started.
 .LP
 \fBslurm_step_launch_wait_finish\fR Block until all tasks have finished 
diff --git a/doc/man/man3/slurm_step_launch_fwd_signal.3 b/doc/man/man3/slurm_step_launch_fwd_signal.3
new file mode 100644
index 000000000..b54973d67
--- /dev/null
+++ b/doc/man/man3/slurm_step_launch_fwd_signal.3
@@ -0,0 +1 @@
+.so man3/slurm_step_launch.3
diff --git a/doc/man/man3/slurm_terminate_job.3 b/doc/man/man3/slurm_terminate_job.3
new file mode 100644
index 000000000..90c80c4ee
--- /dev/null
+++ b/doc/man/man3/slurm_terminate_job.3
@@ -0,0 +1 @@
+.so man3/slurm_kill_job.3
diff --git a/doc/man/man3/slurm_terminate_job_step.3 b/doc/man/man3/slurm_terminate_job_step.3
new file mode 100644
index 000000000..90c80c4ee
--- /dev/null
+++ b/doc/man/man3/slurm_terminate_job_step.3
@@ -0,0 +1 @@
+.so man3/slurm_kill_job.3
diff --git a/doc/man/man3/slurm_trigger.3 b/doc/man/man3/slurm_trigger.3
new file mode 100644
index 000000000..e69de29bb
diff --git a/doc/man/man5/bluegene.conf.5 b/doc/man/man5/bluegene.conf.5
index 29bcb9520..4ecc5ca5d 100644
--- a/doc/man/man5/bluegene.conf.5
+++ b/doc/man/man5/bluegene.conf.5
@@ -5,6 +5,9 @@ bluegene.conf \- Slurm configuration file for BlueGene systems
 \fB/etc/bluegene.conf\fP is an ASCII file which describes BlueGene specific 
 SLURM configuration information. This includes specifications for bgblock 
 layout, configuration, logging, etc.
+The file location can be modified at system build time using the
+DEFAULT_SLURM_CONF parameter. The file will always be located in the 
+same directory as the \fBslurm.conf\fP file. 
 .LP
 Paramter names are case insensitive.
 Any text following a "#" in the configuration file is treated 
diff --git a/doc/man/man5/slurm.conf.5 b/doc/man/man5/slurm.conf.5
index 41806f1b7..b5afb15d8 100644
--- a/doc/man/man5/slurm.conf.5
+++ b/doc/man/man5/slurm.conf.5
@@ -1,4 +1,4 @@
-.TH "slurm.conf" "5" "July 2008" "slurm.conf 1.3" "Slurm configuration file"
+.TH "slurm.conf" "5" "September 2008" "slurm.conf 1.3" "Slurm configuration file"
 
 .SH "NAME"
 slurm.conf \- Slurm configuration file 
@@ -40,7 +40,9 @@ The overall configuration parameters available include:
 \fBAccountingStorageEnforce\fR
 If set to a non-zero value and the user, partition, account association is not 
 defined for a job in the accounting database then prevent the job from being 
-executed.
+executed. This needs to be set to '2' if you the association limits will also 
+be enforced.  If set to anything else limits of associations will not be 
+enforced.
 The default value is zero.
 
 .TP
@@ -97,7 +99,10 @@ The default value is "accounting_storage/none", which means that
 account records are not maintained. 
 The value "accounting_storage/pgsql" indicates that accounting records
 should be written to a PostgreSQL database specified by the 
-\fBAccountingStorageLoc\fR parameter.
+\fBAccountingStorageLoc\fR parameter.  This plugin is not complete and 
+should not be used if wanting to use associations.  It will however work with
+basic accounting of jobs and job steps.  If interested in 
+completing please email slurm-dev@lists.llnl.gov.
 The value "accounting_storage/slurmdbd" indicates that accounting records
 will be written to SlurmDDB, which manages an underlying MySQL or 
 PostgreSQL database. See "man slurmdbd" for more information.
@@ -600,17 +605,37 @@ on SPANK plugins, see the \fBspank\fR(8) manual.
 \fBPrivateData\fR
 This controls what type of information is hidden from regular users.
 By default, all information is visible to all users.
-User \fBSlurmUser\fR and root can always view all information.
+User \fBSlurmUser\fR and \fBroot\fR can always view all information.
 Multiple values may be specified with a comma separator.
 Acceptable values include:
 .RS
 .TP
-\fBjobs\fR prevents users from viewing jobs or job steps belonging
-to other users.
+\fBaccounts\fR 
+(NON-SLURMDBD ACCOUNTING ONLY) prevents users from viewing any account 
+definitions unless they are coordinators of them.
+.TP
+\fBjobs\fR
+prevents users from viewing jobs or job steps belonging
+to other users. (NON-SLURMDBD ACCOUNTING ONLY) prevents users from viewing 
+job records belonging to other users unless they are coordinators of 
+the association running the job when using sacct.
+.TP
+\fBnodes\fR
+prevents users from viewing node state information.
+.TP
+\fBpartitions\fR 
+prevents users from viewing partition state information.
 .TP
-\fBnodes\fR prevents users from viewing node state information.
+\fBusers\fR 
+(NON-SLURMDBD ACCOUNTING ONLY) prevents users from viewing 
+information of any user other than themselves, this also makes it so users can 
+only see associations they deal with.  
+Coordinators can see associations of all users they are coordinator of, 
+but can only see themselves when listing users.
 .TP
-\fBpartitions\fR prevents users from viewing partition state information.
+\fBusage\fR 
+(NON-SLURMDBD ACCOUNTING ONLY) prevents users from viewing 
+usage of any other user.  This applys to sreport.
 .RE
 
 
@@ -768,14 +793,49 @@ Related configuration options include \fBResumeProgram\fR, \fBSuspendRate\fR,
 
 .TP
 \fBReturnToService\fR
-If set to 1, then a non\-responding (DOWN) node will become available 
-for use upon registration. Note that DOWN node's state will be changed 
-only if it was set DOWN due to being non\-responsive. If the node was 
-set DOWN for any other reason (low memory, prolog failure, epilog 
-failure, etc.), its state will not automatically be changed.  The 
-default value is 0, which means that a node will remain in the 
-DOWN state until a system administrator explicitly changes its state
-(even if the slurmd daemon registers and resumes communications).
+Controls when a DOWN node will be returned to service. 
+The default value is 0.
+Supported values include
+.RS
+.TP 4
+\fB0\fR
+A node will remain in the DOWN state until a system administrator
+explicitly changes its state (even if the slurmd daemon registers
+and resumes communications).
+.TP
+\fB1\fR
+A non\-responding (DOWN) node will become available for use upon
+registration. Note that DOWN node's state will be changed only if
+it was set DOWN due to being non\-responsive. If the node was
+set DOWN for any other reason (low memory, prolog failure, epilog
+failure, etc.), its state will not automatically be changed.  
+.TP
+\fB2\fR
+A DOWN node will become available for use upon registration with a
+valid configuration.  The node could have been set DOWN for any reason. 
+.RE
+
+.TP
+\fBSallocDefaultCommand\fR
+Normally, \fBsalloc\fR(1) will run the user's default shell when
+a command to execute is not specified on the \fBsalloc\fR command line.
+If \fBSallocDefaultCommand\fR is specified, \fBsalloc\fR will instead
+run the configured command. The command is passed to '/bin/sh \-c', so
+shell metacharacters are allowed, and commands with multiple arguments
+should be quoted. For instance:
+
+.nf
+    SallocDefaultCommand = "$SHELL"
+.fi
+
+would run the shell in the user's $SHELL environment variable.
+and
+
+.nf
+    SallocDefaultCommand = "xterm \-T Job_$SLURM_JOB_ID"
+.fi
+
+would run \fBxterm\fR with the title set to the SLURM jobid.
 
 .TP
 \fBSchedulerPort\fR
@@ -1086,7 +1146,7 @@ value of \fBSwitchType\fR, records of all jobs in any state may be lost.
 \fBTaskEpilog\fR
 Fully qualified pathname of a program to be execute as the slurm job's
 owner after termination of each task.
-See \fBTaskPlugin\fR for execution order details.
+See \fBTaskProlog\fR for execution order details.
 
 .TP
 \fBTaskPlugin\fR
@@ -1104,27 +1164,6 @@ different operating systems.
 If that is the case, you may want to use Portable Linux 
 Process Affinity (PLPA, see http://www.open-mpi.org/software/plpa), 
 which is supported by SLURM.
-The order of task prolog/epilog execution is as follows:
-.RS
-.TP
-\fB1. pre_launch()\fR: function in TaskPlugin
-.TP
-\fB2. TaskProlog\fR: system\-wide per task program defined in slurm.conf
-.TP
-\fB3. user prolog\fR: job step specific task program defined using 
-\fBsrun\fR's \fB\-\-task\-prolog\fR option or \fBSLURM_TASK_PROLOG\fR 
-environment variable
-.TP
-\fB4.\fR Execute the job step's task
-.TP
-\fB5. user epilog\fR: job step specific task program defined using
-\fBsrun\fR's \fB\-\-task\-epilog\fR option or \fBSLURM_TASK_EPILOG\fR 
-environment variable
-.TP
-\fB6. TaskEpilog\fR: system\-wide per task program defined in slurm.conf
-.TP
-\fB7. post_term()\fR: function in TaskPlugin
-.RE 
 
 .TP
 \fBTaskPluginParam\fR
@@ -1149,7 +1188,27 @@ available to identify the process ID of the task being started.
 Standard output from this program of the form 
 "export NAME=value" will be used to set environment variables 
 for the task being spawned. 
-See \fBTaskPlugin\fR for execution order details.
+The order of task prolog/epilog execution is as follows:
+.RS
+.TP
+\fB1. pre_launch()\fR: function in TaskPlugin
+.TP
+\fB2. TaskProlog\fR: system\-wide per task program defined in slurm.conf
+.TP
+\fB3. user prolog\fR: job step specific task program defined using
+\fBsrun\fR's \fB\-\-task\-prolog\fR option or \fBSLURM_TASK_PROLOG\fR
+environment variable
+.TP
+\fB4.\fR Execute the job step's task
+.TP
+\fB5. user epilog\fR: job step specific task program defined using
+\fBsrun\fR's \fB\-\-task\-epilog\fR option or \fBSLURM_TASK_EPILOG\fR
+environment variable
+.TP
+\fB6. TaskEpilog\fR: system\-wide per task program defined in slurm.conf
+.TP
+\fB7. post_term()\fR: function in TaskPlugin
+.RE
 
 .TP
 \fBTmpFS\fR
@@ -1611,11 +1670,19 @@ is very important.
 \fBSelectTypeParameters\fR should be configured to treat
 memory as a consumable resource and the \fB\-\-mem\fR option
 should be used for job allocations.
-For more information see the following web page:
-\fIhttps://computing.llnl.gov/linux/slurm/cons_res_share.html\fR.
-.na
 Possible values for \fBShared\fR are "EXCLUSIVE", "FORCE", "YES", and "NO".
-.ad
+Sharing of resources is typically useful only when using
+\fBSchedulerType=sched/gang\fR.
+For more information see the following web pages:
+.br
+\fIhttps://computing.llnl.gov/linux/slurm/cons_res.html\fR,
+.br
+\fIhttps://computing.llnl.gov/linux/slurm/cons_res_share.html\fR,
+.br
+\fIhttps://computing.llnl.gov/linux/slurm/gang_scheduling.html\fR, and
+.br
+\fIhttps://computing.llnl.gov/linux/slurm/preempt.html\fR.
+
 .RS
 .TP 12
 \fBEXCLUSIVE\fR
diff --git a/doc/man/man5/slurmdbd.conf.5 b/doc/man/man5/slurmdbd.conf.5
index e1435514f..ab40162d9 100644
--- a/doc/man/man5/slurmdbd.conf.5
+++ b/doc/man/man5/slurmdbd.conf.5
@@ -5,9 +5,9 @@ slurmdbd.conf \- Slurm Database Daemon (SlurmDBD) configuration file
 .SH "DESCRIPTION"
 \fB/etc/slurmdb.conf\fP is an ASCII file which describes Slurm Database 
 Daemon (SlurmDBD) configuration information.
-.LP
 The file location can be modified at system build time using the
 DEFAULT_SLURM_CONF parameter. 
+.LP
 The contents of the file are case insensitive except for the names of nodes 
 and files. Any text following a "#" in the configuration file is treated 
 as a comment through the end of that line. 
@@ -21,18 +21,6 @@ This file should be protected from unauthorized access since it
 contains a database password.
 The overall configuration parameters available include:
 
-.TP
-\fBAllowView\fR
-This controls who can view accounting records. 
-A value of "user" prevents normal users from viewing accounting records
-that are not generated directly by them (preventing them from viewing 
-any other users jobs).
-A value of "account" prevents normal users from viewing accounting 
-records that are not generated by users in the same account.
-A value of "none" lets any user view accounting records generated by 
-any other user.
-The default value is "none".
-
 .TP
 \fBArchiveAge\fR
 Move data over this age out of the database to an archive.
@@ -142,6 +130,37 @@ This is a colon\-separated list of directories, like the PATH
 environment variable. 
 The default value is "/usr/local/lib/slurm".
 
+.TP
+\fBPrivateData\fR
+This controls what type of information is hidden from regular users.
+By default, all information is visible to all users.
+User \fBSlurmUser\fR, \fBroot\fR, and users with AdminLevel=Admin can always 
+view all information.
+Multiple values may be specified with a comma separator.
+Acceptable values include:
+.RS
+.TP
+\fBaccounts\fR 
+prevents users from viewing any account definitions unless they are 
+coordinators of them.
+.TP
+\fBjobs\fR 
+prevents users from viewing job records belonging
+to other users unless they are coordinators of the association running the job
+when using sacct.
+.TP
+\fBusers\fR  
+prevents users from viewing information of any user 
+other than themselves, this also makes it so users can only see 
+associations they deal with.  
+Coordinators can see associations of all users they are coordinator of, 
+but can only see themselves when listing users.
+.TP
+\fBusage\fR  
+prevents users from viewing usage of any other user.  
+This applys to sreport.
+.RE
+
 .TP
 \fBSlurmUser\fR
 The name of the user that the \fBslurmctld\fR daemon executes as. 
@@ -194,7 +213,10 @@ should be written to a MySQL database specified by the
 \fStorageLoc\fR parameter.
 The value "accounting_storage/pgsql" indicates that accounting records
 should be written to a PostgreSQL database specified by the 
-\fBStorageLoc\fR parameter.
+\fBStorageLoc\fR parameter.  This plugin is not complete and 
+should not be used if wanting to use associations.  It will however work with
+basic accounting of jobs and job steps.  If interested in 
+completing please email slurm-dev@lists.llnl.gov.
 This value must be specified.
 
 .TP
diff --git a/doc/man/man5/wiki.conf.5 b/doc/man/man5/wiki.conf.5
index 5edce905f..ddc81f090 100644
--- a/doc/man/man5/wiki.conf.5
+++ b/doc/man/man5/wiki.conf.5
@@ -4,6 +4,9 @@ wiki.conf \- Slurm configuration file for wiki and wiki2 scheduler plugins
 .SH "DESCRIPTION"
 \fB/etc/wiki.conf\fP is an ASCII file which describes wiki and wiki2 
 scheduler specific SLURM configuration information. 
+The file location can be modified at system build time using the
+DEFAULT_SLURM_CONF parameter. The file will always be located in the 
+same directory as the \fBslurm.conf\fP file. 
 .LP
 Paramter names are case insensitive.
 Any text following a "#" in the configuration file is treated 
diff --git a/etc/init.d.slurmdbd b/etc/init.d.slurmdbd
index 55473e438..0f854892c 100755
--- a/etc/init.d.slurmdbd
+++ b/etc/init.d.slurmdbd
@@ -21,8 +21,9 @@
 
 CONFDIR=/etc/slurm
 SBINDIR=/usr/sbin
+LIBDIR=/usr/lib
 
-# Source function library.
+#Source function library.
 if [ -f /etc/rc.status ]; then
    . /etc/rc.status
    SUSE=1
@@ -44,6 +45,12 @@ else
    RETVAL=0
 fi
 
+# We can not use a starter program without losing environment 
+# variables that are critical on Blue Gene systems
+if [ -d /bgl/BlueLight/ppcfloor ]; then
+   STARTPROC=""
+fi
+
 # Source slurm specific configuration
 if [ -f /etc/sysconfig/slurm ] ; then
     . /etc/sysconfig/slurm
@@ -53,18 +60,23 @@ fi
 
 [ -f $CONFDIR/slurmdbd.conf ] || exit 1
 
+# setup library paths for slurm and munge support
+export LD_LIBRARY_PATH="$LIBDIR:$LD_LIBRARY_PATH"
+
 start() {
-    echo -n "starting slurmdbd: " 
+    prog=$1
+    shift
+    echo -n "starting $prog: " 
     unset HOME MAIL USER USERNAME 
-    $STARTPROC $SBINDIR/slurmdbd $SLURMDBD_OPTIONS
+    $STARTPROC $SBINDIR/$prog $SLURMDBD_OPTIONS
     rc_status -v
     echo
     touch /var/lock/subsys/slurmdbd
 }
 
 stop() { 
-    echo -n "stopping slurmdbd: "
-    killproc slurmdbd -TERM
+    echo -n "stopping $1: "
+    killproc $1 -TERM
     rc_status -v
     echo
     rm -f /var/lock/subsys/slurmdbd
@@ -76,7 +88,7 @@ slurmstatus() {
     local rpid
     local pidfile
 
-    pidfile=`grep -i PidFile $CONFDIR/slurmdbd.conf | grep -v '^ *#'`
+    pidfile=`grep -i pidfile $CONFDIR/slurmdbd.conf | grep -v '^ *#'`
     if [ $? = 0 ]; then
         pidfile=${pidfile##*=}
         pidfile=${pidfile%#*}
@@ -84,29 +96,51 @@ slurmstatus() {
         pidfile=/var/run/slurmdbd.pid
     fi
 
-    pid=`pidof -o $$ -o $$PPID -o %PPID -x slurmdbd`
+   pid=`pidof -o $$ -o $$PPID -o %PPID -x $1 || \
+         pidof -o $$ -o $$PPID -o %PPID -x ${base}`
 
     if [ -f $pidfile ]; then
         read rpid < $pidfile
         if [ "$rpid" != "" -a "$pid" != "" ]; then
             for i in $pid ; do
                 if [ "$i" = "$rpid" ]; then 
-                    echo $"slurmdbd (pid $pid) is running..."
+                    echo $"${base} (pid $pid) is running..."
                     return 0
                 fi     
             done
         elif [ "$rpid" != "" -a "$pid" = "" ]; then
-            echo $"slurmdbd is stopped"
-            return 1
+		echo $"${base} dead but pid file exists"
+		return 1
         fi 
 
     fi
-     
-    echo $"slurmdbd is stopped"
+
+    if [ "$base" = "slurmdbd" -a "$pid" != "" ] ; then
+        echo $"${base} (pid $pid) is running..."
+        return 0
+    fi
+    
+    echo $"${base} is stopped"
     
     return 3
 }
 
+#
+# stop slurm daemons, 
+# wait for termination to complete (up to 10 seconds) before returning
+#
+slurmstop() {
+	stop $1
+	
+	for i in 1 2 3 4
+	do
+		sleep $i
+		slurmstatus $1
+		if [ $? != 0 ]; then
+			break
+		fi
+	done
+}
 #
 # The pathname substitution in daemon command assumes prefix and
 # exec_prefix are same.  This is the default, unless the user requests
@@ -119,14 +153,14 @@ case "$1" in
 	start slurmdbd
         ;;
     stop)
-	stop slurmdbd
+	slurmstop slurmdbd
         ;;
     status)
 	slurmstatus slurmdbd
         ;;
     restart)
-	stop slurmdbd
-	start slurmdbd
+	$0 stop
+	$0 start
         ;;
     condrestart)
         if [ -f /var/lock/subsys/slurm ]; then
diff --git a/slurm.spec b/slurm.spec
index b452bc5ef..7f2f983f3 100644
--- a/slurm.spec
+++ b/slurm.spec
@@ -1,4 +1,4 @@
-# $Id: slurm.spec 14836 2008-08-21 15:58:32Z jette $
+# $Id: slurm.spec 15324 2008-10-07 00:16:53Z da $
 #
 # Note that this package is not relocatable
 
@@ -71,14 +71,14 @@
 %endif
 
 Name:    slurm
-Version: 1.3.8
+Version: 1.3.9
 Release: 1%{?dist}
 
 Summary: Simple Linux Utility for Resource Management
 
 License: GPL 
 Group: System Environment/Base
-Source: slurm-1.3.8.tar.bz2
+Source: slurm-1.3.9.tar.bz2
 BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}
 URL: https://computing.llnl.gov/linux/slurm/
 
@@ -256,7 +256,7 @@ SLURM process tracking plugin for SGI job containers.
 #############################################################################
 
 %prep
-%setup -n slurm-1.3.8
+%setup -n slurm-1.3.9
 
 %build
 %configure --program-prefix=%{?_program_prefix:%{_program_prefix}} \
@@ -315,8 +315,6 @@ test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/task_affinity.so &&
    echo %{_libdir}/slurm/task_affinity.so >> $LIST
 test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/crypto_openssl.so &&
    echo %{_libdir}/slurm/crypto_openssl.so >> $LIST
-test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/accounting_storage_gold.so
-   echo %{_libdir}/slurm/accounting_storage_gold.so >> $LIST
 
 
 #############################################################################
diff --git a/slurm/slurm.h.in b/slurm/slurm.h.in
index 142db9666..0954fc814 100644
--- a/slurm/slurm.h.in
+++ b/slurm/slurm.h.in
@@ -760,6 +760,7 @@ typedef struct {
 				   to a node than available processors,
 				   "false" to accept at most one task per
 				   processor. "false" by default. */
+	bool no_kill;		/* true of no kill on node failure */
 	uint16_t ckpt_interval;	/* checkpoint interval in minutes */
 	char *ckpt_path;	/* path to store checkpoint image files */
 	uint16_t verbose_level; /* for extra logging decisions in step
@@ -1063,6 +1064,7 @@ typedef struct slurm_ctl_conf {
                                       * they are considered "unkillable". */
 	uint16_t use_pam;	/* enable/disable PAM support */
 	uint16_t wait_time;	/* default job --wait time */
+	char *salloc_default_command; /* default salloc command */
 } slurm_ctl_conf_t;
 
 typedef struct slurmd_status_msg {
diff --git a/src/api/allocate.c b/src/api/allocate.c
index 0fd089694..0fea867c2 100644
--- a/src/api/allocate.c
+++ b/src/api/allocate.c
@@ -1,6 +1,6 @@
 /*****************************************************************************\
  *  allocate.c - allocate nodes for a job or step with supplied contraints
- *  $Id: allocate.c 14571 2008-07-18 22:25:56Z jette $
+ *  $Id: allocate.c 14992 2008-09-05 20:10:34Z da $
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -262,7 +262,7 @@ slurm_allocate_resources_blocking (const job_desc_msg_t *user_req,
 							     timeout);
 			/* If NULL, we didn't get the allocation in 
 			   the time desired, so just free the job id */
-			if (resp == NULL) {
+			if (resp == NULL && errno != ESLURM_ALREADY_DONE) {
 				errnum = errno;
 				slurm_complete_job(job_id, -1);
 			}
diff --git a/src/api/pmi_server.c b/src/api/pmi_server.c
index ee0122497..d621d590e 100644
--- a/src/api/pmi_server.c
+++ b/src/api/pmi_server.c
@@ -1,6 +1,6 @@
 /*****************************************************************************\
  *  pmi_server.c - Global PMI data as maintained within srun
- *  $Id: pmi_server.c 14078 2008-05-19 23:56:20Z jette $
+ *  $Id: pmi_server.c 15376 2008-10-10 19:28:11Z da $
  *****************************************************************************
  *  Copyright (C) 2005-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -168,8 +168,8 @@ static void *_msg_thread(void *x)
 
 	slurm_mutex_lock(&agent_mutex);
 	agent_cnt--;
-	slurm_mutex_unlock(&agent_mutex);
 	pthread_cond_signal(&agent_cond);
+	slurm_mutex_unlock(&agent_mutex);
 	xfree(x);
 	return NULL;
 }
diff --git a/src/api/step_ctx.c b/src/api/step_ctx.c
index 9cc2ddc8b..95956f3ac 100644
--- a/src/api/step_ctx.c
+++ b/src/api/step_ctx.c
@@ -1,7 +1,7 @@
 /*****************************************************************************\
  *  step_ctx.c - step_ctx task functions for use by AIX/POE
  *
- *  $Id: step_ctx.c 14469 2008-07-09 18:15:23Z jette $
+ *  $Id: step_ctx.c 15262 2008-10-01 22:58:26Z jette $
  *****************************************************************************
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -140,6 +140,7 @@ slurm_step_ctx_create (const slurm_step_ctx_params_t *step_params)
 	ctx->magic	= STEP_CTX_MAGIC;
 	ctx->job_id	= step_req->job_id;
 	ctx->user_id	= step_req->user_id;
+	ctx->no_kill	= step_params->no_kill;
 	ctx->step_req   = step_req;
 	ctx->step_resp	= step_resp;
 	ctx->verbose_level = step_params->verbose_level;
@@ -214,6 +215,7 @@ slurm_step_ctx_create_no_alloc (const slurm_step_ctx_params_t *step_params,
 	ctx->magic	= STEP_CTX_MAGIC;
 	ctx->job_id	= step_req->job_id;
 	ctx->user_id	= step_req->user_id;
+	ctx->no_kill	= step_params->no_kill;
 	ctx->step_req   = step_req;
 	ctx->step_resp	= step_resp;
 	ctx->verbose_level = step_params->verbose_level;
@@ -400,15 +402,16 @@ slurm_step_ctx_daemon_per_node_hack(slurm_step_ctx_t *ctx)
 
 	/* hack the context step layout */
 	old_layout = ctx->step_resp->step_layout;
-	new_layout = (slurm_step_layout_t *)xmalloc(sizeof(slurm_step_layout_t));
+	new_layout = (slurm_step_layout_t *)
+		     xmalloc(sizeof(slurm_step_layout_t));
 	new_layout->node_cnt = old_layout->node_cnt;
 	new_layout->task_cnt = old_layout->node_cnt;
 	new_layout->node_list = xstrdup(old_layout->node_list);
 	slurm_step_layout_destroy(old_layout);
-	new_layout->tasks =
-		(uint16_t *)xmalloc(sizeof(uint16_t) * new_layout->node_cnt);
-	new_layout->tids =
-		(uint32_t **)xmalloc(sizeof(uint32_t *) * new_layout->node_cnt);
+	new_layout->tasks = (uint16_t *) xmalloc(sizeof(uint16_t) * 
+						 new_layout->node_cnt);
+	new_layout->tids = (uint32_t **) xmalloc(sizeof(uint32_t *) * 
+						 new_layout->node_cnt);
 	for (i = 0; i < new_layout->node_cnt; i++) {
 		new_layout->tasks[i] = 1;
 		new_layout->tids[i] = (uint32_t *)xmalloc(sizeof(uint32_t));
diff --git a/src/api/step_ctx.h b/src/api/step_ctx.h
index 7b1ae2a5c..74dc6e397 100644
--- a/src/api/step_ctx.h
+++ b/src/api/step_ctx.h
@@ -54,7 +54,8 @@ struct slurm_step_ctx_struct {
 	/* Used by slurm_step_launch() */
 	struct step_launch_state *launch_state;
 	uint16_t verbose_level; /* for extra logging decisions in step
-				   launch api */
+				 * launch api */
+	bool no_kill;		/* if set, don't kill step on node DOWN */
 };
 
 #endif /* _STEP_CTX_H */
diff --git a/src/api/step_launch.c b/src/api/step_launch.c
index 2533db78b..482ce3be7 100644
--- a/src/api/step_launch.c
+++ b/src/api/step_launch.c
@@ -317,7 +317,8 @@ int slurm_step_launch_wait_start(slurm_step_ctx_t *ctx)
 		if (sls->abort) {
 			if (!sls->abort_action_taken) {
 				slurm_kill_job_step(ctx->job_id,
-						    ctx->step_resp->job_step_id,
+						    ctx->step_resp->
+						    job_step_id,
 						    SIGKILL);
 				sls->abort_action_taken = true;
 			}
@@ -366,7 +367,8 @@ void slurm_step_launch_wait_finish(slurm_step_ctx_t *ctx)
 		} else {
 			if (!sls->abort_action_taken) {
 				slurm_kill_job_step(ctx->job_id,
-						    ctx->step_resp->job_step_id,
+						    ctx->step_resp->
+						    job_step_id,
 						    SIGKILL);
 				sls->abort_action_taken = true;
 			}
@@ -399,14 +401,18 @@ void slurm_step_launch_wait_finish(slurm_step_ctx_t *ctx)
 					ctx->job_id,
 					ctx->step_resp->job_step_id,
 					SIGKILL);
-				if (!sls->user_managed_io)
-					client_io_handler_abort(sls->io.normal);
+				if (!sls->user_managed_io) {
+					client_io_handler_abort(sls->
+								io.normal);
+				}
 				break;
 			} else if (errnum != 0) {
 				error("Error waiting on condition in"
 				      " slurm_step_launch_wait_finish: %m");
-				if (!sls->user_managed_io)
-					client_io_handler_abort(sls->io.normal);
+				if (!sls->user_managed_io) {
+					client_io_handler_abort(sls->
+								io.normal);
+				}
 				break;
 			}
 		}
@@ -565,6 +571,7 @@ struct step_launch_state *step_launch_state_create(slurm_step_ctx_t *ctx)
 	sls->resp_port = NULL;
 	sls->abort = false;
 	sls->abort_action_taken = false;
+	sls->no_kill = ctx->no_kill;
 	sls->mpi_info->jobid = ctx->step_req->job_id;
 	sls->mpi_info->stepid = ctx->step_resp->job_step_id;
 	sls->mpi_info->step_layout = layout;
@@ -627,7 +634,8 @@ static int _msg_thr_create(struct step_launch_state *sls, int num_nodes)
 	sls->resp_port = xmalloc(sizeof(uint16_t) * sls->num_resp_port);
 	for (i = 0; i < sls->num_resp_port; i++) {
 		if (net_stream_listen(&sock, &port) < 0) {
-			error("unable to intialize step launch listening socket: %m");
+			error("unable to intialize step launch listening "
+			      "socket: %m");
 			return SLURM_ERROR;
 		}
 		sls->resp_port[i] = port;
@@ -848,6 +856,15 @@ _node_fail_handler(struct step_launch_state *sls, slurm_msg_t *fail_msg)
 	int node_id, num_tasks;
 
 	error("Node failure on %s", nf->nodelist);
+	if (!sls->no_kill) {
+		info("Cancelling job step %u.%u", nf->job_id, nf->step_id);
+		slurm_kill_job_step(nf->job_id, nf->step_id, SIGKILL);
+		/* In an ideal world, we close the socket to this node and
+		 * normally terminate the remaining tasks. In practice this
+		 * is very difficult. The exercise is left to the reader. */
+		exit(1);
+	}
+
 	fail_nodes = hostset_create(nf->nodelist);
 	fail_itr = hostset_iterator_create(fail_nodes);
 	num_node_ids = hostset_count(fail_nodes);
diff --git a/src/api/step_launch.h b/src/api/step_launch.h
index 529193eb6..e4d99903e 100644
--- a/src/api/step_launch.h
+++ b/src/api/step_launch.h
@@ -1,7 +1,7 @@
 /*****************************************************************************\
  *  step_launch.h - launch a parallel job step
  *
- *  $Id: step_launch.h 13672 2008-03-19 23:10:58Z jette $
+ *  $Id: step_launch.h 15262 2008-10-01 22:58:26Z jette $
  *****************************************************************************
  *  Copyright (C) 2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -58,6 +58,7 @@ struct step_launch_state {
 	bitstr_t *tasks_exited;  /* or never started correctly */
 	bool abort;
 	bool abort_action_taken;
+	bool no_kill;
 
 	/* message thread variables */
 	eio_handle_t *msg_handle;
diff --git a/src/common/assoc_mgr.c b/src/common/assoc_mgr.c
index 3a41b90a1..1e4461899 100644
--- a/src/common/assoc_mgr.c
+++ b/src/common/assoc_mgr.c
@@ -39,6 +39,7 @@
 
 #include <sys/types.h>
 #include <pwd.h>
+#include <fcntl.h>
 
 #include "src/common/uid.h"
 #include "src/common/xstring.h"
@@ -54,26 +55,115 @@ void (*remove_assoc_notify) (acct_association_rec_t *rec) = NULL;
 static pthread_mutex_t local_association_lock = PTHREAD_MUTEX_INITIALIZER;
 static pthread_mutex_t local_qos_lock = PTHREAD_MUTEX_INITIALIZER;
 static pthread_mutex_t local_user_lock = PTHREAD_MUTEX_INITIALIZER;
+static pthread_mutex_t local_file_lock = PTHREAD_MUTEX_INITIALIZER;
+
+static int _grab_parents_qos(acct_association_rec_t *assoc)
+{
+	acct_association_rec_t *parent_assoc = NULL;
+	char *qos_char = NULL;
+	ListIterator itr = NULL;
+
+	if(!assoc)
+		return SLURM_ERROR;
+
+	if(assoc->qos_list)
+		list_flush(assoc->qos_list);
+	else
+		assoc->qos_list = list_create(slurm_destroy_char);
+
+	parent_assoc = assoc->parent_assoc_ptr;
+
+	if(!parent_assoc || !parent_assoc->qos_list
+	   || !list_count(parent_assoc->qos_list)) 
+		return SLURM_SUCCESS;
+	
+	itr = list_iterator_create(parent_assoc->qos_list);
+	while((qos_char = list_next(itr))) 
+		list_append(assoc->qos_list, xstrdup(qos_char));
+	list_iterator_destroy(itr);
+
+	return SLURM_SUCCESS;
+}
+
+static int _local_update_assoc_qos_list(acct_association_rec_t *assoc, 
+					List new_qos_list)
+{
+	ListIterator new_qos_itr = NULL, curr_qos_itr = NULL;
+	char *new_qos = NULL, *curr_qos = NULL;
+	int flushed = 0;
+
+	if(!assoc || !new_qos_list) {
+		error("need both new qos_list and an association to update");
+		return SLURM_ERROR;
+	}
+	
+	if(!list_count(new_qos_list)) {
+		_grab_parents_qos(assoc);
+		return SLURM_SUCCESS;
+	}			
+
+	new_qos_itr = list_iterator_create(new_qos_list);
+	curr_qos_itr = list_iterator_create(assoc->qos_list);
+	
+	while((new_qos = list_next(new_qos_itr))) {
+		if(new_qos[0] == '-') {
+			while((curr_qos = list_next(curr_qos_itr))) {
+				if(!strcmp(curr_qos, new_qos+1)) {
+					list_delete_item(curr_qos_itr);
+					break;
+				}
+			}
+
+			list_iterator_reset(curr_qos_itr);
+		} else if(new_qos[0] == '+') {
+			while((curr_qos = list_next(curr_qos_itr))) 
+				if(!strcmp(curr_qos, new_qos+1)) 
+					break;
+			
+			if(!curr_qos) {
+				list_append(assoc->qos_list,
+					    xstrdup(new_qos+1));
+				list_iterator_reset(curr_qos_itr);
+			}
+		} else if(new_qos[0] == '=') {
+			if(!flushed)
+				list_flush(assoc->qos_list);
+			list_append(assoc->qos_list, xstrdup(new_qos+1));
+			flushed = 1;
+		} else if(new_qos[0]) {
+			if(!flushed)
+				list_flush(assoc->qos_list);
+			list_append(assoc->qos_list, xstrdup(new_qos));
+			flushed = 1;			
+		}
+	}
+	list_iterator_destroy(curr_qos_itr);
+	list_iterator_destroy(new_qos_itr);
+
+	return SLURM_SUCCESS;	
+}
 
 /* locks should be put in place before calling this function */
-static int _set_assoc_parent_and_user(acct_association_rec_t *assoc)
+static int _set_assoc_parent_and_user(acct_association_rec_t *assoc,
+				      List assoc_list)
 {
-	if(!assoc) {
+	if(!assoc || !assoc_list) {
 		error("you didn't give me an association");
 		return SLURM_ERROR;
 	}
 
 	if(assoc->parent_id) {
 		acct_association_rec_t *assoc2 = NULL;
-		ListIterator itr = list_iterator_create(local_association_list);
+		ListIterator itr = list_iterator_create(assoc_list);
 		while((assoc2 = list_next(itr))) {
 			if(assoc2->id == assoc->parent_id) {
-				assoc->parent_acct_ptr = assoc2;
+				assoc->parent_assoc_ptr = assoc2;
 				break;
 			}
 		}
 		list_iterator_destroy(itr);
 	}
+
 	if(assoc->user) {
 		uid_t pw_uid = uid_from_string(assoc->user);
 		if(pw_uid == (uid_t) -1) 
@@ -88,6 +178,43 @@ static int _set_assoc_parent_and_user(acct_association_rec_t *assoc)
 	return SLURM_SUCCESS;
 }
 
+static int _post_association_list(List assoc_list)
+{
+	acct_association_rec_t *assoc = NULL;
+	ListIterator itr = NULL;
+	//DEF_TIMERS;
+
+	if(!assoc_list)
+		return SLURM_ERROR;
+
+	itr = list_iterator_create(assoc_list);
+	//START_TIMER;
+	while((assoc = list_next(itr))) 
+		_set_assoc_parent_and_user(assoc, assoc_list);
+	list_iterator_destroy(itr);
+	//END_TIMER2("load_associations");
+	return SLURM_SUCCESS;
+}
+	
+static int _post_user_list(List user_list)
+{
+	acct_user_rec_t *user = NULL;
+	ListIterator itr = list_iterator_create(user_list);
+	//START_TIMER;
+	while((user = list_next(itr))) {
+		uid_t pw_uid = uid_from_string(user->name);
+		if(pw_uid == (uid_t) -1) {
+			if(slurmdbd_conf)
+				debug("couldn't get a uid for user %s",
+				      user->name);
+			user->uid = (uint32_t)NO_VAL;
+		} else
+			user->uid = pw_uid;
+	}
+	list_iterator_destroy(itr);
+	return SLURM_SUCCESS;
+}
+
 static int _get_local_association_list(void *db_conn, int enforce)
 {
 	acct_association_cond_t assoc_q;
@@ -135,15 +262,10 @@ static int _get_local_association_list(void *db_conn, int enforce)
 			       "list was given so we are giving a blank list");
 			return SLURM_SUCCESS;
 		}
-	} else {
-		acct_association_rec_t *assoc = NULL;
-		ListIterator itr = list_iterator_create(local_association_list);
-		//START_TIMER;
-		while((assoc = list_next(itr))) 
-			_set_assoc_parent_and_user(assoc);
-		list_iterator_destroy(itr);
-		//END_TIMER2("load_associations");
-	}
+	} 
+
+	_post_association_list(local_association_list);
+
 	slurm_mutex_unlock(&local_association_lock);
 
 	return SLURM_SUCCESS;
@@ -161,8 +283,7 @@ static int _get_local_qos_list(void *db_conn, int enforce)
 	if(!local_qos_list) {
 		slurm_mutex_unlock(&local_qos_lock);
 		if(enforce) {
-			error("_get_local_qos_list: "
-			      "no list was made.");
+			error("_get_local_qos_list: no list was made.");
 			return SLURM_ERROR;
 		} else {
 			return SLURM_SUCCESS;
@@ -195,57 +316,201 @@ static int _get_local_user_list(void *db_conn, int enforce)
 		} else {
 			return SLURM_SUCCESS;
 		}		
-	} else {
-		acct_user_rec_t *user = NULL;
-		ListIterator itr = list_iterator_create(local_user_list);
-		//START_TIMER;
-		while((user = list_next(itr))) {
-			uid_t pw_uid = uid_from_string(user->name);
-			if(pw_uid == (uid_t) -1) {
-				debug("couldn't get a uid for user %s",
-				      user->name);
-				user->uid = (uint32_t)NO_VAL;
-			} else
-				user->uid = pw_uid;
+	} 
+
+	_post_user_list(local_user_list);
+	
+	slurm_mutex_unlock(&local_user_lock);
+	return SLURM_SUCCESS;
+}
+
+static int _refresh_local_association_list(void *db_conn, int enforce)
+{
+	acct_association_cond_t assoc_q;
+	List current_assocs = NULL;
+	char *cluster_name = NULL;
+	uid_t uid = getuid();
+	ListIterator curr_itr = NULL;
+	ListIterator local_itr = NULL;
+	acct_association_rec_t *curr_assoc = NULL, *assoc = NULL;
+//	DEF_TIMERS;
+
+	memset(&assoc_q, 0, sizeof(acct_association_cond_t));
+	if(local_cluster_name) {
+		assoc_q.cluster_list = list_create(slurm_destroy_char);
+		cluster_name = xstrdup(local_cluster_name);
+		if(!cluster_name) {
+			if(enforce && !slurmdbd_conf) {
+				error("_get_local_association_list: "
+				      "no cluster name here going to get "
+				      "all associations.");
+			}
+		} else 
+			list_append(assoc_q.cluster_list, cluster_name);
+	}
+
+
+	slurm_mutex_lock(&local_association_lock);
+
+	current_assocs = local_association_list;
+
+//	START_TIMER;
+	local_association_list = 
+		acct_storage_g_get_associations(db_conn, uid, &assoc_q);
+//	END_TIMER2("get_associations");
+
+	if(assoc_q.cluster_list)
+		list_destroy(assoc_q.cluster_list);
+	
+	if(!local_association_list) {
+		local_association_list = current_assocs;
+		slurm_mutex_unlock(&local_association_lock);
+		
+		error("_refresh_local_association_list: "
+		      "no new list given back keeping cached one.");
+		return SLURM_ERROR;
+	}
+ 
+	_post_association_list(local_association_list);
+	
+	if(!current_assocs) {
+		slurm_mutex_unlock(&local_association_lock);
+		return SLURM_SUCCESS;
+	}
+	
+	curr_itr = list_iterator_create(current_assocs);
+	local_itr = list_iterator_create(local_association_list);
+	/* add limitss */
+	while((curr_assoc = list_next(curr_itr))) {
+		while((assoc = list_next(local_itr))) {
+			if(assoc->id == curr_assoc->id) 
+				break;
 		}
-		list_iterator_destroy(itr);
-		//END_TIMER2("load_users");
+		
+		if(!assoc) 
+			continue;
+		assoc->used_jobs = curr_assoc->used_jobs;
+		assoc->used_submit_jobs = curr_assoc->used_submit_jobs;
+		assoc->used_shares = curr_assoc->used_shares;
+		list_iterator_reset(local_itr);			
+	}
+	
+	list_iterator_destroy(curr_itr);
+	list_iterator_destroy(local_itr);
+		
+	slurm_mutex_unlock(&local_association_lock);
+
+	if(current_assocs)
+		list_destroy(current_assocs);
+
+	return SLURM_SUCCESS;
+}
+
+/* This only gets a new list if available dropping the old one if
+ * needed
+ */
+static int _refresh_local_qos_list(void *db_conn, int enforce)
+{
+	List current_qos = NULL;
+	uid_t uid = getuid();
+
+	current_qos = acct_storage_g_get_qos(db_conn, uid, NULL);
+
+	if(!current_qos) {
+		error("_refresh_local_qos_list: "
+		      "no new list given back keeping cached one.");
+		return SLURM_ERROR;
 	}
 
+	slurm_mutex_lock(&local_qos_lock);
+	if(local_qos_list)
+		list_destroy(local_qos_list);
+
+	local_qos_list = current_qos;
+
+	slurm_mutex_unlock(&local_qos_lock);
+
+	return SLURM_SUCCESS;
+}
+
+/* This only gets a new list if available dropping the old one if
+ * needed 
+ */
+static int _refresh_local_user_list(void *db_conn, int enforce)
+{
+	List current_users = NULL;
+	acct_user_cond_t user_q;
+	uid_t uid = getuid();
+
+	memset(&user_q, 0, sizeof(acct_user_cond_t));
+	user_q.with_coords = 1;
+	
+	current_users = acct_storage_g_get_users(db_conn, uid, &user_q);
+
+	if(!current_users) {
+		error("_refresh_local_user_list: "
+		      "no new list given back keeping cached one.");
+		return SLURM_ERROR;
+	}
+	_post_user_list(current_users);
+
+	slurm_mutex_lock(&local_user_lock);
+
+	if(local_user_list) 
+		list_destroy(local_user_list);
+
+	local_user_list = current_users;
+	
 	slurm_mutex_unlock(&local_user_lock);
+
 	return SLURM_SUCCESS;
 }
 
 extern int assoc_mgr_init(void *db_conn, assoc_init_args_t *args)
 {
-	int enforce = 0;
+	static uint16_t enforce = 0;
+	static uint16_t cache_level = ASSOC_MGR_CACHE_ALL;
 
 	if(args) {
 		enforce = args->enforce;
 		if(args->remove_assoc_notify)
 			remove_assoc_notify = args->remove_assoc_notify;
+		cache_level = args->cache_level;
+		assoc_mgr_refresh_lists(db_conn, args);	
+	}
+	
+	if(running_cache) { 
+		debug4("No need to run assoc_mgr_init, "
+		       "we probably don't have a connection.  "
+		       "If we do use assoc_mgr_refresh_lists instead.");
+		return SLURM_SUCCESS;
 	}
 
-	if(!local_cluster_name && !slurmdbd_conf)
+	if((!local_cluster_name) && !slurmdbd_conf) {
+		xfree(local_cluster_name);
 		local_cluster_name = slurm_get_cluster_name();
+	}
 
-	if(!local_association_list) 
+	if((!local_association_list) && (cache_level & ASSOC_MGR_CACHE_ASSOC)) 
 		if(_get_local_association_list(db_conn, enforce) == SLURM_ERROR)
 			return SLURM_ERROR;
 
-	if(!local_qos_list) 
+	if((!local_qos_list) && (cache_level & ASSOC_MGR_CACHE_QOS))
 		if(_get_local_qos_list(db_conn, enforce) == SLURM_ERROR)
 			return SLURM_ERROR;
 
-	if(!local_user_list) 
+	if((!local_user_list) && (cache_level & ASSOC_MGR_CACHE_USER))
 		if(_get_local_user_list(db_conn, enforce) == SLURM_ERROR)
 			return SLURM_ERROR;
 
 	return SLURM_SUCCESS;
 }
 
-extern int assoc_mgr_fini(void)
+extern int assoc_mgr_fini(char *state_save_location)
 {
+	if(state_save_location)
+		dump_assoc_mgr_state(state_save_location);
+
 	if(local_association_list) 
 		list_destroy(local_association_list);
 	if(local_qos_list)
@@ -311,7 +576,7 @@ extern int assoc_mgr_fill_in_assoc(void *db_conn, acct_association_rec_t *assoc,
 	}
 /* 	info("looking for assoc of user=%s(%u), acct=%s, " */
 /* 	     "cluster=%s, partition=%s", */
-/* 	     assoc->user, assoc->uid, assoc->acct,  */
+/* 	     assoc->user, assoc->uid, assoc->acct, */
 /* 	     assoc->cluster, assoc->partition); */
 	slurm_mutex_lock(&local_association_lock);
 	itr = list_iterator_create(local_association_list);
@@ -382,17 +647,31 @@ extern int assoc_mgr_fill_in_assoc(void *db_conn, acct_association_rec_t *assoc,
 		assoc->cluster = ret_assoc->cluster;
 	if(!assoc->partition)
 		assoc->partition = ret_assoc->partition;
-	assoc->fairshare                 = ret_assoc->fairshare;
-	assoc->max_cpu_secs_per_job      = ret_assoc->max_cpu_secs_per_job;
-	assoc->max_jobs                  = ret_assoc->max_jobs;
-	assoc->max_nodes_per_job         = ret_assoc->max_nodes_per_job;
-	assoc->max_wall_duration_per_job = ret_assoc->max_wall_duration_per_job;
-	assoc->parent_acct_ptr           = ret_assoc->parent_acct_ptr;
+
+	assoc->fairshare       = ret_assoc->fairshare;
+
+	assoc->grp_cpu_mins   = ret_assoc->grp_cpu_mins;
+	assoc->grp_cpus        = ret_assoc->grp_cpus;
+	assoc->grp_jobs        = ret_assoc->grp_jobs;
+	assoc->grp_nodes       = ret_assoc->grp_nodes;
+	assoc->grp_submit_jobs = ret_assoc->grp_submit_jobs;
+	assoc->grp_wall        = ret_assoc->grp_wall;
+
+	assoc->max_cpu_mins_pj = ret_assoc->max_cpu_mins_pj;
+	assoc->max_cpus_pj     = ret_assoc->max_cpus_pj;
+	assoc->max_jobs        = ret_assoc->max_jobs;
+	assoc->max_nodes_pj    = ret_assoc->max_nodes_pj;
+	assoc->max_submit_jobs = ret_assoc->max_submit_jobs;
+	assoc->max_wall_pj     = ret_assoc->max_wall_pj;
+
 	if(assoc->parent_acct) {
 		xfree(assoc->parent_acct);
 		assoc->parent_acct       = xstrdup(ret_assoc->parent_acct);
 	} else 
 		assoc->parent_acct       = ret_assoc->parent_acct;
+
+	assoc->parent_assoc_ptr          = ret_assoc->parent_assoc_ptr;
+
 	slurm_mutex_unlock(&local_association_lock);
 
 	return SLURM_SUCCESS;
@@ -515,8 +794,10 @@ extern int assoc_mgr_update_local_assocs(acct_update_object_t *update)
 	while((object = list_pop(update->objects))) {
 		if(object->cluster && local_cluster_name) {
 			/* only update the local clusters assocs */
-			if(strcasecmp(object->cluster, local_cluster_name))
+			if(strcasecmp(object->cluster, local_cluster_name)) {
+				destroy_acct_association_rec(object);	
 				continue;
+			}
 		}
 		list_iterator_reset(itr);
 		while((rec = list_next(itr))) {
@@ -570,28 +851,35 @@ extern int assoc_mgr_update_local_assocs(acct_update_object_t *update)
 				break;
 			}
 			debug("updating assoc %u", rec->id);
-			if(object->fairshare != NO_VAL) {
+			if(object->fairshare != NO_VAL) 
 				rec->fairshare = object->fairshare;
-			}
-
-			if(object->max_jobs != NO_VAL) {
+			
+			if(object->grp_cpu_mins != NO_VAL) 
+				rec->grp_cpu_mins = object->grp_cpu_mins;
+			if(object->grp_cpus != NO_VAL) 
+				rec->grp_cpus = object->grp_cpus;
+			if(object->grp_jobs != NO_VAL) 
+				rec->grp_jobs = object->grp_jobs;
+			if(object->grp_nodes != NO_VAL) 
+				rec->grp_nodes = object->grp_nodes;
+			if(object->grp_submit_jobs != NO_VAL) 
+				rec->grp_submit_jobs = object->grp_submit_jobs;
+			if(object->grp_wall != NO_VAL) 
+				rec->grp_wall = object->grp_wall;
+			
+			if(object->max_cpu_mins_pj != NO_VAL) 
+				rec->max_cpu_mins_pj = object->max_cpu_mins_pj;
+			if(object->max_cpus_pj != NO_VAL) 
+				rec->max_cpus_pj = object->max_cpus_pj;
+			if(object->max_jobs != NO_VAL) 
 				rec->max_jobs = object->max_jobs;
-			}
-
-			if(object->max_nodes_per_job != NO_VAL) {
-				rec->max_nodes_per_job =
-					object->max_nodes_per_job;
-			}
-
-			if(object->max_wall_duration_per_job != NO_VAL) {
-				rec->max_wall_duration_per_job =
-					object->max_wall_duration_per_job;
-			}
-
-			if(object->max_cpu_secs_per_job != NO_VAL) {
-				rec->max_cpu_secs_per_job = 
-					object->max_cpu_secs_per_job;
-			}
+			if(object->max_nodes_pj != NO_VAL) 
+				rec->max_nodes_pj = object->max_nodes_pj;
+			if(object->max_submit_jobs != NO_VAL) 
+				rec->max_submit_jobs = object->max_submit_jobs;
+			if(object->max_wall_pj != NO_VAL) 
+				rec->max_wall_pj = object->max_wall_pj;
+			
 
 			if(object->parent_acct) {
 				xfree(rec->parent_acct);
@@ -604,15 +892,30 @@ extern int assoc_mgr_update_local_assocs(acct_update_object_t *update)
 				parents_changed = 1;
 				
 			}
-			log_assoc_rec(rec);
+
+			if(object->qos_list) {
+				if(rec->qos_list) {
+					_local_update_assoc_qos_list(
+						rec, object->qos_list);
+				} else {
+					rec->qos_list = object->qos_list;
+					object->qos_list = NULL;
+				}
+			}
+			
+			slurm_mutex_lock(&local_qos_lock);
+			log_assoc_rec(rec, local_qos_list);
+			slurm_mutex_unlock(&local_qos_lock);
 			break;
 		case ACCT_ADD_ASSOC:
 			if(rec) {
 				//rc = SLURM_ERROR;
 				break;
 			}
-			_set_assoc_parent_and_user(object);
 			list_append(local_association_list, object);
+			object = NULL;
+			parents_changed = 1; // set since we need to
+					     // set the parent
 			break;
 		case ACCT_REMOVE_ASSOC:
 			if(!rec) {
@@ -626,28 +929,18 @@ extern int assoc_mgr_update_local_assocs(acct_update_object_t *update)
 		default:
 			break;
 		}
-		if(update->type != ACCT_ADD_ASSOC) {
-			destroy_acct_association_rec(object);			
-		}				
+		
+		destroy_acct_association_rec(object);			
 	}
-
+		
+	/* We have to do this after the entire list is processed since
+	 * we may have added the parent which wasn't in the list before
+	 */
 	if(parents_changed) {
-		ListIterator itr2 = 
-			list_iterator_create(local_association_list);
 		list_iterator_reset(itr);
-
-		while((object = list_next(itr))) {
-			if(object->parent_id) {
-				while((rec = list_next(itr2))) {
-					if(rec->id == object->parent_id) {
-						object->parent_acct_ptr = rec;
-						break;
-					}
-				}
-				list_iterator_reset(itr2);
-			}
-		}
-		list_iterator_destroy(itr2);
+		while((object = list_next(itr))) 
+			_set_assoc_parent_and_user(
+				object, local_association_list);
 	}
 
 	list_iterator_destroy(itr);
@@ -673,10 +966,10 @@ extern int assoc_mgr_update_local_users(acct_update_object_t *update)
 	while((object = list_pop(update->objects))) {
 		list_iterator_reset(itr);
 		while((rec = list_next(itr))) {
-			if(!strcasecmp(object->name, rec->name)) {
+			if(!strcasecmp(object->name, rec->name)) 
 				break;
-			}
 		}
+
 		//info("%d user %s", update->type, object->name);
 		switch(update->type) {
 		case ACCT_MODIFY_USER:
@@ -691,13 +984,6 @@ extern int assoc_mgr_update_local_users(acct_update_object_t *update)
 				object->default_acct = NULL;
 			}
 
-			if(object->qos_list) {
-				if(rec->qos_list)
-					list_destroy(rec->qos_list);
-				rec->qos_list = object->qos_list;
-				object->qos_list = NULL;
-			}
-
 			if(object->admin_level != ACCT_ADMIN_NOTSET) 
 				rec->admin_level = object->admin_level;
 
@@ -715,6 +1001,7 @@ extern int assoc_mgr_update_local_users(acct_update_object_t *update)
 			} else
 				object->uid = pw_uid;
 			list_append(local_user_list, object);
+			object = NULL;
 			break;
 		case ACCT_REMOVE_USER:
 			if(!rec) {
@@ -744,9 +1031,8 @@ extern int assoc_mgr_update_local_users(acct_update_object_t *update)
 		default:
 			break;
 		}
-		if(update->type != ACCT_ADD_USER) {
-			destroy_acct_user_rec(object);			
-		}
+		
+		destroy_acct_user_rec(object);			
 	}
 	list_iterator_destroy(itr);
 	slurm_mutex_unlock(&local_user_lock);
@@ -756,10 +1042,14 @@ extern int assoc_mgr_update_local_users(acct_update_object_t *update)
 
 extern int assoc_mgr_update_local_qos(acct_update_object_t *update)
 {
-	acct_qos_rec_t * rec = NULL;
-	acct_qos_rec_t * object = NULL;
-		
-	ListIterator itr = NULL;
+	acct_qos_rec_t *rec = NULL;
+	acct_qos_rec_t *object = NULL;
+
+	char *qos_char = NULL, *tmp_char = NULL;
+
+	ListIterator itr = NULL, assoc_itr = NULL, qos_itr = NULL;
+
+	acct_association_rec_t *assoc = NULL;
 	int rc = SLURM_SUCCESS;
 
 	if(!local_qos_list)
@@ -774,6 +1064,7 @@ extern int assoc_mgr_update_local_qos(acct_update_object_t *update)
 				break;
 			}
 		}
+
 		//info("%d qos %s", update->type, object->name);
 		switch(update->type) {
 		case ACCT_ADD_QOS:
@@ -782,8 +1073,36 @@ extern int assoc_mgr_update_local_qos(acct_update_object_t *update)
 				break;
 			}
 			list_append(local_qos_list, object);
+			object = NULL;			
+			break;
+		case ACCT_MODIFY_QOS:
+			/* FIX ME: fill in here the qos changes stuff */
 			break;
 		case ACCT_REMOVE_QOS:
+			/* Remove this qos from all the associations
+			   on this cluster.
+			*/
+			tmp_char = xstrdup_printf("%d", object->id);
+			slurm_mutex_lock(&local_association_lock);
+			assoc_itr = list_iterator_create(
+				local_association_list);
+			while((assoc = list_next(assoc_itr))) {
+				if(!assoc->qos_list
+				   || !list_count(assoc->qos_list))
+					continue;
+				qos_itr = list_iterator_create(assoc->qos_list);
+				while((qos_char = list_next(qos_itr))) {
+					if(!strcmp(qos_char, tmp_char)) {
+						list_delete_item(qos_itr);
+						break;
+					}
+				}
+				list_iterator_destroy(qos_itr);
+			}
+			list_iterator_destroy(assoc_itr);
+			slurm_mutex_unlock(&local_association_lock);
+			xfree(tmp_char);
+
 			if(!rec) {
 				//rc = SLURM_ERROR;
 				break;
@@ -793,9 +1112,7 @@ extern int assoc_mgr_update_local_qos(acct_update_object_t *update)
 		default:
 			break;
 		}
-		if(update->type != ACCT_ADD_QOS) {
-			destroy_acct_qos_rec(object);			
-		}
+		destroy_acct_qos_rec(object);			
 	}
 	list_iterator_destroy(itr);
 	slurm_mutex_unlock(&local_qos_lock);
@@ -845,9 +1162,272 @@ extern void assoc_mgr_clear_used_info(void)
 	itr = list_iterator_create(local_association_list);
 	while((found_assoc = list_next(itr))) {
 		found_assoc->used_jobs  = 0;
-		found_assoc->used_share = 0;
+		found_assoc->used_shares = 0;
 	}
 	list_iterator_destroy(itr);
 	slurm_mutex_unlock(&local_association_lock);
 }
 
+extern int dump_assoc_mgr_state(char *state_save_location) 
+{
+	static int high_buffer_size = (1024 * 1024);
+	int error_code = 0, log_fd;
+	char *old_file = NULL, *new_file = NULL, *reg_file = NULL;
+	dbd_list_msg_t msg;
+	Buf buffer = init_buf(high_buffer_size);
+	DEF_TIMERS;
+
+	START_TIMER;
+	/* write header: version, time */
+	pack16(SLURMDBD_VERSION, buffer);
+	pack_time(time(NULL), buffer);
+
+	if(local_association_list) {
+		memset(&msg, 0, sizeof(dbd_list_msg_t));
+		slurm_mutex_lock(&local_association_lock);
+		msg.my_list = local_association_list;
+		/* let us know what to unpack */
+		pack16(DBD_ADD_ASSOCS, buffer);
+		slurmdbd_pack_list_msg(SLURMDBD_VERSION, 
+				       DBD_ADD_ASSOCS, &msg, buffer);
+		slurm_mutex_unlock(&local_association_lock);
+	}
+	
+	if(local_user_list) {
+		memset(&msg, 0, sizeof(dbd_list_msg_t));
+		slurm_mutex_lock(&local_user_lock);
+		msg.my_list = local_user_list;
+		/* let us know what to unpack */
+		pack16(DBD_ADD_USERS, buffer);
+		slurmdbd_pack_list_msg(SLURMDBD_VERSION, 
+				       DBD_ADD_USERS, &msg, buffer);
+		slurm_mutex_unlock(&local_user_lock);
+	}
+
+	if(local_qos_list) {		
+		memset(&msg, 0, sizeof(dbd_list_msg_t));
+		slurm_mutex_lock(&local_qos_lock);
+		msg.my_list = local_qos_list;
+		/* let us know what to unpack */
+		pack16(DBD_ADD_QOS, buffer);
+		slurmdbd_pack_list_msg(SLURMDBD_VERSION, 
+				       DBD_ADD_QOS, &msg, buffer);	
+		slurm_mutex_unlock(&local_qos_lock);
+	}
+
+	/* write the buffer to file */
+	old_file = xstrdup(state_save_location);
+	xstrcat(old_file, "/assoc_mgr_state.old");
+	reg_file = xstrdup(state_save_location);
+	xstrcat(reg_file, "/assoc_mgr_state");
+	new_file = xstrdup(state_save_location);
+	xstrcat(new_file, "/assoc_mgr_state.new");
+	
+	slurm_mutex_lock(&local_file_lock);
+	log_fd = creat(new_file, 0600);
+	if (log_fd == 0) {
+		error("Can't save state, create file %s error %m",
+		      new_file);
+		error_code = errno;
+	} else {
+		int pos = 0, nwrite = get_buf_offset(buffer), amount;
+		char *data = (char *)get_buf_data(buffer);
+		high_buffer_size = MAX(nwrite, high_buffer_size);
+		while (nwrite > 0) {
+			amount = write(log_fd, &data[pos], nwrite);
+			if ((amount < 0) && (errno != EINTR)) {
+				error("Error writing file %s, %m", new_file);
+				error_code = errno;
+				break;
+			}
+			nwrite -= amount;
+			pos    += amount;
+		}
+		fsync(log_fd);
+		close(log_fd);
+	}
+	if (error_code)
+		(void) unlink(new_file);
+	else {			/* file shuffle */
+		(void) unlink(old_file);
+		(void) link(reg_file, old_file);
+		(void) unlink(reg_file);
+		(void) link(new_file, reg_file);
+		(void) unlink(new_file);
+	}
+	xfree(old_file);
+	xfree(reg_file);
+	xfree(new_file);
+	slurm_mutex_unlock(&local_file_lock);
+	
+	free_buf(buffer);
+	END_TIMER2("dump_assoc_mgr_state");
+	return error_code;
+
+}
+
+extern int load_assoc_mgr_state(char *state_save_location)
+{
+	int data_allocated, data_read = 0, error_code = SLURM_SUCCESS;
+	uint32_t data_size = 0;
+	uint16_t type = 0;
+	uint16_t ver = 0;
+	int state_fd;
+	char *data = NULL, *state_file;
+	Buf buffer;
+	time_t buf_time;
+	dbd_list_msg_t *msg = NULL;
+	
+	/* read the file */
+	state_file = xstrdup(state_save_location);
+	xstrcat(state_file, "/assoc_mgr_state");
+	//info("looking at the %s file", state_file);
+	slurm_mutex_lock(&local_file_lock);
+	state_fd = open(state_file, O_RDONLY);
+	if (state_fd < 0) {
+		info("No job state file (%s) to recover", state_file);
+		error_code = ENOENT;
+	} else {
+		data_allocated = BUF_SIZE;
+		data = xmalloc(data_allocated);
+		while (1) {
+			data_read = read(state_fd, &data[data_size],
+					 BUF_SIZE);
+			if (data_read < 0) {
+				if (errno == EINTR)
+					continue;
+				else {
+					error("Read error on %s: %m", 
+					      state_file);
+					break;
+				}
+			} else if (data_read == 0)	/* eof */
+				break;
+			data_size      += data_read;
+			data_allocated += data_read;
+			xrealloc(data, data_allocated);
+		}
+		close(state_fd);
+	}
+	xfree(state_file);
+	slurm_mutex_unlock(&local_file_lock);
+
+	buffer = create_buf(data, data_size);
+
+	safe_unpack16(&ver, buffer);
+	debug3("Version in assoc_mgr_state header is %u", ver);
+	if (ver > SLURMDBD_VERSION || ver < SLURMDBD_VERSION_MIN) {
+		error("***********************************************");
+		error("Can not recover assoc_mgr state, incompatable version, got %u need > %u <= %u", ver, SLURMDBD_VERSION_MIN, SLURMDBD_VERSION);
+		error("***********************************************");
+		free_buf(buffer);
+		return EFAULT;
+	}
+
+	safe_unpack_time(&buf_time, buffer);
+	while (remaining_buf(buffer) > 0) {
+		safe_unpack16(&type, buffer);
+		switch(type) {
+		case DBD_ADD_ASSOCS:
+			error_code = slurmdbd_unpack_list_msg(
+				SLURMDBD_VERSION, DBD_ADD_ASSOCS, &msg, buffer);
+			if (error_code != SLURM_SUCCESS)
+				goto unpack_error;
+			else if(!msg->my_list) {
+				error("No associations retrieved");
+				break;
+			}
+			slurm_mutex_lock(&local_association_lock);
+			local_association_list = msg->my_list;
+			_post_association_list(local_association_list);
+			debug("Recovered %u associations", 
+			      list_count(local_association_list));
+			slurm_mutex_unlock(&local_association_lock);
+			msg->my_list = NULL;
+			slurmdbd_free_list_msg(SLURMDBD_VERSION, msg);
+			break;
+		case DBD_ADD_USERS:
+			error_code = slurmdbd_unpack_list_msg(
+				SLURMDBD_VERSION, DBD_ADD_USERS, &msg, buffer);
+			if (error_code != SLURM_SUCCESS)
+				goto unpack_error;
+			else if(!msg->my_list) {
+				error("No users retrieved");
+				break;
+			}
+			slurm_mutex_lock(&local_user_lock);
+			local_user_list = msg->my_list;
+			_post_user_list(local_user_list);
+			debug("Recovered %u users", 
+			      list_count(local_user_list));
+			slurm_mutex_unlock(&local_user_lock);
+			msg->my_list = NULL;
+			slurmdbd_free_list_msg(SLURMDBD_VERSION, msg);
+			break;
+		case DBD_ADD_QOS:
+			error_code = slurmdbd_unpack_list_msg(
+				SLURMDBD_VERSION, DBD_ADD_QOS, &msg, buffer);
+			if (error_code != SLURM_SUCCESS)
+				goto unpack_error;
+			else if(!msg->my_list) {
+				error("No qos retrieved");
+				break;
+			}
+			slurm_mutex_lock(&local_qos_lock);
+			local_qos_list = msg->my_list;
+			debug("Recovered %u qos", 
+			      list_count(local_qos_list));
+			slurm_mutex_unlock(&local_qos_lock);
+			msg->my_list = NULL;
+			slurmdbd_free_list_msg(SLURMDBD_VERSION, msg);	
+			break;
+		default:
+			error("unknown type %u given", type);
+			goto unpack_error;
+			break;
+		}
+	}
+	running_cache = 1;
+	free_buf(buffer);
+	return SLURM_SUCCESS;
+
+unpack_error:
+	if(buffer)
+		free_buf(buffer);
+	return SLURM_ERROR;
+} 
+
+extern int assoc_mgr_refresh_lists(void *db_conn, assoc_init_args_t *args)
+{
+	static uint16_t enforce = 0;
+	static uint16_t cache_level = ASSOC_MGR_CACHE_ALL;
+
+	if(args) {
+		enforce = args->enforce;
+		cache_level = args->cache_level;
+	}
+	
+	if(!running_cache) { 
+		debug4("No need to run assoc_mgr_refresh_lists if not running "
+		       "cache things are already synced.");
+		return SLURM_SUCCESS;
+	}
+
+	if(cache_level & ASSOC_MGR_CACHE_ASSOC) 
+		if(_refresh_local_association_list(db_conn, enforce)
+		   == SLURM_ERROR)
+			return SLURM_ERROR;
+
+	if(cache_level & ASSOC_MGR_CACHE_QOS)
+		if(_refresh_local_qos_list(db_conn, enforce) == SLURM_ERROR)
+			return SLURM_ERROR;
+
+	if(cache_level & ASSOC_MGR_CACHE_USER)
+		if(_refresh_local_user_list(db_conn, enforce) == SLURM_ERROR)
+			return SLURM_ERROR;
+
+	running_cache = 0;
+	
+	return SLURM_SUCCESS;
+}
+
diff --git a/src/common/assoc_mgr.h b/src/common/assoc_mgr.h
index df9b26f9b..57ec629e0 100644
--- a/src/common/assoc_mgr.h
+++ b/src/common/assoc_mgr.h
@@ -45,13 +45,20 @@
 
 #include "src/common/list.h"
 #include "src/common/slurm_accounting_storage.h"
+#include "src/common/slurmdbd_defs.h"
 #include "src/slurmctld/slurmctld.h"
 #include <slurm/slurm.h>
 #include <slurm/slurm_errno.h>
 
+#define ASSOC_MGR_CACHE_ASSOC 0x0001
+#define ASSOC_MGR_CACHE_QOS 0x0002
+#define ASSOC_MGR_CACHE_USER 0x0004
+#define ASSOC_MGR_CACHE_ALL 0xffff
+
 typedef struct {
-	int enforce;
-	void (*remove_assoc_notify) (acct_association_rec_t *rec);
+	uint16_t cache_level;
+	uint16_t enforce;
+ 	void (*remove_assoc_notify) (acct_association_rec_t *rec);
 } assoc_init_args_t;
 
 /* 
@@ -98,7 +105,7 @@ extern int assoc_mgr_is_user_acct_coord(void *db_conn, uint32_t uid,
 					char *acct);
 
 extern int assoc_mgr_init(void *db_conn, assoc_init_args_t *args);
-extern int assoc_mgr_fini(void);
+extern int assoc_mgr_fini(char *state_save_location);
 
 /* 
  * update associations in local cache 
@@ -138,4 +145,23 @@ extern int assoc_mgr_validate_assoc_id(void *db_conn,
  */
 extern void assoc_mgr_clear_used_info(void);
 
+
+/*
+ * Dump the state information of the association mgr just incase the
+ * database isn't up next time we run.
+ */
+extern int dump_assoc_mgr_state(char *state_save_location);
+
+/*
+ * Read in the information of the association mgr if the database
+ * isn't up when starting.
+ */
+extern int load_assoc_mgr_state(char *state_save_location);
+
+/*
+ * Refresh the lists if when running_cache is set this will load new
+ * information from the database (if any) and update the cached list.
+ */
+extern int assoc_mgr_refresh_lists(void *db_conn, assoc_init_args_t *args);
+
 #endif /* _SLURM_ASSOC_MGR_H */
diff --git a/src/common/env.c b/src/common/env.c
index 008531e22..44e9216c2 100644
--- a/src/common/env.c
+++ b/src/common/env.c
@@ -599,12 +599,14 @@ int setup_env(env_t *env)
 	}
 	
 	if (env->comm_port
-	    && setenvf (&env->env, "SLURM_SRUN_COMM_PORT", "%u", env->comm_port)) {
+	    && setenvf (&env->env, "SLURM_SRUN_COMM_PORT", "%u", 
+			env->comm_port)) {
 		error ("Can't set SLURM_SRUN_COMM_PORT env variable");
 		rc = SLURM_FAILURE;
 	}
 	if (env->comm_hostname
-	    && setenvf (&env->env, "SLURM_SRUN_COMM_HOST", "%s", env->comm_hostname)) {
+	    && setenvf (&env->env, "SLURM_SRUN_COMM_HOST", "%s", 
+			env->comm_hostname)) {
 		error ("Can't set SLURM_SRUN_COMM_HOST env variable");
 		rc = SLURM_FAILURE;
 	}
@@ -773,12 +775,11 @@ extern char *uint32_compressed_to_str(uint32_t array_len,
  *	LOADLBATCH (AIX only)
  *	MPIRUN_PARTITION, MPIRUN_NOFREE, and MPIRUN_NOALLOCATE (BGL only)
  *
- * Sets OBSOLETE variables:
+ * Sets OBSOLETE variables (needed for MPI, do not remove):
  *	SLURM_JOBID
  *	SLURM_NNODES
  *	SLURM_NODELIST
  *	SLURM_TASKS_PER_NODE <- poorly named, really CPUs per node
- *	? probably only needed for users...
  */
 void
 env_array_for_job(char ***dest, const resource_allocation_response_msg_t *alloc)
@@ -810,7 +811,7 @@ env_array_for_job(char ***dest, const resource_allocation_response_msg_t *alloc)
 		env_array_overwrite_fmt(dest, "MPIRUN_NOALLOCATE", "%d", 1);
 	}
 
-	/* obsolete */
+	/* OBSOLETE, but needed by MPI, do not remove */
 	env_array_overwrite_fmt(dest, "SLURM_JOBID", "%u", alloc->job_id);
 	env_array_overwrite_fmt(dest, "SLURM_NNODES", "%u", alloc->node_cnt);
 	env_array_overwrite_fmt(dest, "SLURM_NODELIST", "%s", alloc->node_list);
@@ -835,12 +836,11 @@ env_array_for_job(char ***dest, const resource_allocation_response_msg_t *alloc)
  *	HOSTNAME
  *	LOADLBATCH (AIX only)
  *
- * Sets OBSOLETE variables:
+ * Sets OBSOLETE variables (needed for MPI, do not remove):
  *	SLURM_JOBID
  *	SLURM_NNODES
  *	SLURM_NODELIST
  *	SLURM_TASKS_PER_NODE <- poorly named, really CPUs per node
- *	? probably only needed for users...
  */
 extern void
 env_array_for_batch_job(char ***dest, const batch_job_launch_msg_t *batch,
@@ -871,7 +871,7 @@ env_array_for_batch_job(char ***dest, const batch_job_launch_msg_t *batch,
 	env_array_overwrite(dest, "LOADLBATCH", "yes");
 #endif
 
-	/* OBSOLETE */
+	/* OBSOLETE, but needed by MPI, do not remove */
 	env_array_overwrite_fmt(dest, "SLURM_JOBID", "%u", batch->job_id);
 	env_array_overwrite_fmt(dest, "SLURM_NNODES", "%u", num_nodes);
 	env_array_overwrite_fmt(dest, "SLURM_NODELIST", "%s", batch->nodes);
@@ -920,24 +920,24 @@ env_array_for_step(char ***dest,
 	env_array_overwrite_fmt(dest, "SLURM_STEP_NODELIST",
 				"%s", step->step_layout->node_list);
 	env_array_overwrite_fmt(dest, "SLURM_STEP_NUM_NODES",
-			 "%hu", step->step_layout->node_cnt);
+				"%hu", step->step_layout->node_cnt);
 	env_array_overwrite_fmt(dest, "SLURM_STEP_NUM_TASKS",
-			 "%u", step->step_layout->task_cnt);
+				"%u", step->step_layout->task_cnt);
 	env_array_overwrite_fmt(dest, "SLURM_STEP_TASKS_PER_NODE", "%s", tmp);
 	env_array_overwrite_fmt(dest, "SLURM_STEP_LAUNCHER_HOSTNAME",
-			 "%s", launcher_hostname);
+				"%s", launcher_hostname);
 	env_array_overwrite_fmt(dest, "SLURM_STEP_LAUNCHER_PORT",
-			 "%hu", launcher_port);
+				"%hu", launcher_port);
 
-	/* OBSOLETE */
+	/* OBSOLETE, but needed by MPI, do not remove */
 	env_array_overwrite_fmt(dest, "SLURM_STEPID", "%u", step->job_step_id);
 	env_array_overwrite_fmt(dest, "SLURM_NNODES",
-			 "%hu", step->step_layout->node_cnt);
+				"%hu", step->step_layout->node_cnt);
 	env_array_overwrite_fmt(dest, "SLURM_NPROCS",
-			 "%u", step->step_layout->task_cnt);
+				"%u", step->step_layout->task_cnt);
 	env_array_overwrite_fmt(dest, "SLURM_TASKS_PER_NODE", "%s", tmp);
 	env_array_overwrite_fmt(dest, "SLURM_SRUN_COMM_PORT",
-			 "%hu", launcher_port);
+				"%hu", launcher_port);
 
 	xfree(tmp);
 }
@@ -1405,6 +1405,7 @@ char **env_array_user_default(const char *username, int timeout, int mode)
 		return NULL;
 	}
 	if (child == 0) {
+		setenv("ENVIRONMENT", "BATCH", 1);
 		setpgid(0, 0);
 		close(0);
 		open("/dev/null", O_RDONLY);
diff --git a/src/common/jobacct_common.c b/src/common/jobacct_common.c
index 0529d9db4..14226e7ab 100644
--- a/src/common/jobacct_common.c
+++ b/src/common/jobacct_common.c
@@ -202,7 +202,7 @@ extern void destroy_jobacct_selected_step(void *object)
 }
 
  
-extern void pack_jobacct_job_rec(void *object, Buf buffer)
+extern void pack_jobacct_job_rec(void *object, uint16_t rpc_version, Buf buffer)
 {
 	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
 	ListIterator itr = NULL;
@@ -231,14 +231,14 @@ extern void pack_jobacct_job_rec(void *object, Buf buffer)
 	_pack_sacct(&job->sacct, buffer);
 	pack32(job->show_full, buffer);
 	pack_time(job->start, buffer);
-	pack16(job->state, buffer);
+	pack16((uint16_t)job->state, buffer);
 	if(job->steps)
 		count = list_count(job->steps);
 	pack32(count, buffer);
 	if(count) {
 		itr = list_iterator_create(job->steps);
 		while((step = list_next(itr))) {
-			pack_jobacct_step_rec(step, buffer);
+			pack_jobacct_step_rec(step, rpc_version, buffer);
 		}
 		list_iterator_destroy(itr);
 	}
@@ -255,13 +255,14 @@ extern void pack_jobacct_job_rec(void *object, Buf buffer)
 	pack32(job->user_cpu_usec, buffer);
 }
 
-extern int unpack_jobacct_job_rec(void **job, Buf buffer)
+extern int unpack_jobacct_job_rec(void **job, uint16_t rpc_version, Buf buffer)
 {
 	jobacct_job_rec_t *job_ptr = xmalloc(sizeof(jobacct_job_rec_t));
 	int i = 0;
 	jobacct_step_rec_t *step = NULL;
 	uint32_t count = 0;
 	uint32_t uint32_tmp;
+	uint16_t uint16_tmp;
 
 	*job = job_ptr;
 
@@ -273,26 +274,29 @@ extern int unpack_jobacct_job_rec(void **job, Buf buffer)
 	safe_unpack32(&job_ptr->elapsed, buffer);
 	safe_unpack_time(&job_ptr->eligible, buffer);
 	safe_unpack_time(&job_ptr->end, buffer);
-	safe_unpack32((uint32_t *)&job_ptr->exitcode, buffer);
+	safe_unpack32(&uint32_tmp, buffer);
+	job_ptr->exitcode = (int32_t)uint32_tmp;
 	safe_unpack32(&job_ptr->gid, buffer);
 	safe_unpack32(&job_ptr->jobid, buffer);
 	safe_unpackstr_xmalloc(&job_ptr->jobname, &uint32_tmp, buffer);
 	safe_unpack32(&job_ptr->lft, buffer);
 	safe_unpackstr_xmalloc(&job_ptr->partition, &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&job_ptr->nodes, &uint32_tmp, buffer);
-	safe_unpack32((uint32_t *)&job_ptr->priority, buffer);
+	safe_unpack32(&uint32_tmp, buffer);
+	job_ptr->priority = (int32_t)uint32_tmp;
 	safe_unpack16(&job_ptr->qos, buffer);
 	safe_unpack32(&job_ptr->req_cpus, buffer);
 	safe_unpack32(&job_ptr->requid, buffer);
 	_pack_sacct(&job_ptr->sacct, buffer);
 	safe_unpack32(&job_ptr->show_full, buffer);
 	safe_unpack_time(&job_ptr->start, buffer);
-	safe_unpack16((uint16_t *)&job_ptr->state, buffer);
+	safe_unpack16(&uint16_tmp, buffer);
+	job_ptr->state = uint16_tmp;
 	safe_unpack32(&count, buffer);
 
 	job_ptr->steps = list_create(destroy_jobacct_step_rec);
 	for(i=0; i<count; i++) {
-		unpack_jobacct_step_rec(&step, buffer);
+		unpack_jobacct_step_rec(&step, rpc_version, buffer);
 		if(step)
 			list_append(job_ptr->steps, step);
 	}
@@ -317,7 +321,8 @@ unpack_error:
 	return SLURM_ERROR;
 }
  
-extern void pack_jobacct_step_rec(jobacct_step_rec_t *step, Buf buffer)
+extern void pack_jobacct_step_rec(jobacct_step_rec_t *step, 
+				  uint16_t rpc_version, Buf buffer)
 {
 	pack32(step->elapsed, buffer);
 	pack_time(step->end, buffer);
@@ -340,23 +345,27 @@ extern void pack_jobacct_step_rec(jobacct_step_rec_t *step, Buf buffer)
 	pack32(step->user_cpu_usec, buffer);
 }
 
-extern int unpack_jobacct_step_rec(jobacct_step_rec_t **step, Buf buffer)
+extern int unpack_jobacct_step_rec(jobacct_step_rec_t **step, 
+				   uint16_t rpc_version, Buf buffer)
 {
 	uint32_t uint32_tmp;
+	uint16_t uint16_tmp;
 	jobacct_step_rec_t *step_ptr = xmalloc(sizeof(jobacct_step_rec_t));
 
 	*step = step_ptr;
 
 	safe_unpack32(&step_ptr->elapsed, buffer);
 	safe_unpack_time(&step_ptr->end, buffer);
-	safe_unpack32((uint32_t *)&step_ptr->exitcode, buffer);
+	safe_unpack32(&uint32_tmp, buffer);
+	step_ptr->exitcode = (int32_t)uint32_tmp;
 	safe_unpack32(&step_ptr->jobid, buffer);
 	safe_unpack32(&step_ptr->ncpus, buffer);
         safe_unpackstr_xmalloc(&step_ptr->nodes, &uint32_tmp, buffer);
 	safe_unpack32(&step_ptr->requid, buffer);
 	_unpack_sacct(&step_ptr->sacct, buffer);
 	safe_unpack_time(&step_ptr->start, buffer);
-	safe_unpack16((uint16_t *)&step_ptr->state, buffer);
+	safe_unpack16(&uint16_tmp, buffer);
+	step_ptr->state = uint16_tmp;
 	safe_unpack32(&step_ptr->stepid, buffer);	/* job's step number */
 	safe_unpackstr_xmalloc(&step_ptr->stepname, &uint32_tmp, buffer);
 	safe_unpack32(&step_ptr->suspended, buffer);
@@ -376,14 +385,14 @@ unpack_error:
 } 
 
 extern void pack_jobacct_selected_step(jobacct_selected_step_t *step,
-				       Buf buffer)
+				       uint16_t rpc_version, Buf buffer)
 {
 	pack32(step->jobid, buffer);
 	pack32(step->stepid, buffer);
 }
 
 extern int unpack_jobacct_selected_step(jobacct_selected_step_t **step,
-					Buf buffer)
+					uint16_t rpc_version, Buf buffer)
 {
 	jobacct_selected_step_t *step_ptr =
 		xmalloc(sizeof(jobacct_selected_step_t));
diff --git a/src/common/jobacct_common.h b/src/common/jobacct_common.h
index af391dd23..754bb5969 100644
--- a/src/common/jobacct_common.h
+++ b/src/common/jobacct_common.h
@@ -162,6 +162,13 @@ typedef struct {
 			   * client.  This should not be freed, packed
 			   * or unpacked
 			   */
+	uint32_t associd;
+	char    *cluster; /* This is a pointer to the cluster var inside
+			   * the jobacct_job_rec_t that contains this
+			   * step.  It is to be used only in the
+			   * client.  This should not be freed, packed
+			   * or unpacked
+			   */
 	uint32_t elapsed;
 	time_t end;
 	int32_t exitcode;
@@ -225,16 +232,20 @@ extern void destroy_jobacct_job_rec(void *object);
 extern void destroy_jobacct_step_rec(void *object);
 extern void destroy_jobacct_selected_step(void *object);
 
-extern void pack_jobacct_job_rec(void *object, Buf buffer);
-extern int unpack_jobacct_job_rec(void **object, Buf buffer);
+extern void pack_jobacct_job_rec(void *object,
+				 uint16_t rpc_version, Buf buffer);
+extern int unpack_jobacct_job_rec(void **object, uint16_t rpc_version, 
+				  Buf buffer);
  
-extern void pack_jobacct_step_rec(jobacct_step_rec_t *step, Buf buffer);
-extern int unpack_jobacct_step_rec(jobacct_step_rec_t **step, Buf buffer);
+extern void pack_jobacct_step_rec(jobacct_step_rec_t *step, 
+				  uint16_t rpc_version, Buf buffer);
+extern int unpack_jobacct_step_rec(jobacct_step_rec_t **step, 
+				   uint16_t rpc_version, Buf buffer);
 
 extern void pack_jobacct_selected_step(jobacct_selected_step_t *step,
-				       Buf buffer);
+				       uint16_t rpc_version, Buf buffer);
 extern int unpack_jobacct_selected_step(jobacct_selected_step_t **step,
-					Buf buffer);
+					uint16_t rpc_version, Buf buffer);
 
 /* These should only be called from the jobacct-gather plugin */
 extern int jobacct_common_init_struct(struct jobacctinfo *jobacct, 
diff --git a/src/common/log.c b/src/common/log.c
index 7a6acc7e0..5c0f0719d 100644
--- a/src/common/log.c
+++ b/src/common/log.c
@@ -1,6 +1,6 @@
 /*****************************************************************************\
  *  log.c - slurm logging facilities
- *  $Id: log.c 13672 2008-03-19 23:10:58Z jette $
+ *  $Id: log.c 15367 2008-10-09 20:51:36Z da $
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -362,7 +362,7 @@ static char *vxstrfmt(const char *fmt, va_list ap)
 	size_t      len = (size_t) 0;
 	char        tmp[LINEBUFSIZE];
 	int         unprocessed = 0;
-
+	int         long_long = 0;
 
 	while (*fmt != '\0') {
 
@@ -436,24 +436,76 @@ static char *vxstrfmt(const char *fmt, va_list ap)
 					xstrcat(buf, "%u");
 				break;
 			case 'l':
+				if((unprocessed == 0) && (*(p+1) == 'l')) {
+					long_long = 1;
+					p++;
+				}
+				
 				if ((unprocessed == 0) && (*(p+1) == 'u')) {
-					snprintf(tmp, sizeof(tmp), "%lu",
-						va_arg(ap, long unsigned));
+					if(long_long) {
+						snprintf(tmp, sizeof(tmp),
+							"%llu", 
+							 va_arg(ap,
+								long long unsigned));
+						long_long = 0;
+					} else 
+						snprintf(tmp, sizeof(tmp),
+							 "%lu",
+							 va_arg(ap,
+								long unsigned));
 					xstrcat(buf, tmp);
 					p++;
 				} else if ((unprocessed==0) && (*(p+1)=='d')) {
-					snprintf(tmp, sizeof(tmp), "%ld",
-						va_arg(ap, long int));
+					if(long_long) {
+						snprintf(tmp, sizeof(tmp),
+							"%lld", 
+							 va_arg(ap,
+								long long int));
+						long_long = 0;
+					} else
+						snprintf(tmp, sizeof(tmp),
+							 "%ld",
+							 va_arg(ap, long int));
+					xstrcat(buf, tmp);
+					p++;
+				} else if ((unprocessed==0) && (*(p+1)=='f')) {
+					if(long_long) {
+						xstrcat(buf, "%llf");
+						long_long = 0;
+					} else 
+						snprintf(tmp, sizeof(tmp),
+							 "%lf",
+							 va_arg(ap, double));
 					xstrcat(buf, tmp);
 					p++;
 				} else if ((unprocessed==0) && (*(p+1)=='x')) {
-					snprintf(tmp, sizeof(tmp), "%lx",
-						va_arg(ap, long int));
+					if(long_long) {
+						snprintf(tmp, sizeof(tmp),
+							 "%llx", 
+							 va_arg(ap,
+								long long int));
+						long_long = 0;
+					} else
+						snprintf(tmp, sizeof(tmp),
+							 "%lx",
+							 va_arg(ap, long int));
 					xstrcat(buf, tmp);
 					p++;
+				} else if(long_long) {
+					xstrcat(buf, "%ll");
+					long_long = 0;
 				} else
 					xstrcat(buf, "%l");
 				break; 
+			case 'L':
+				if ((unprocessed==0) && (*(p+1)=='f')) {
+					snprintf(tmp, sizeof(tmp), "%Lf", 
+						 va_arg(ap, long double));
+					xstrcat(buf, tmp);
+					p++;
+				} else
+					xstrcat(buf, "%L");
+				break;
 			default:	/* try to handle the rest  */
 				xstrcatchar(buf, '%');
 				xstrcatchar(buf, *p);
diff --git a/src/common/node_select.c b/src/common/node_select.c
index 71c67fccf..314643532 100644
--- a/src/common/node_select.c
+++ b/src/common/node_select.c
@@ -9,7 +9,7 @@
  *  the plugin. This is because functions required by the plugin can not be 
  *  resolved on the front-end nodes, so we can't load the plugins there.
  *
- *  $Id: node_select.c 14208 2008-06-06 19:15:24Z da $
+ *  $Id: node_select.c 15345 2008-10-07 23:21:00Z da $
  *****************************************************************************
  *  Copyright (C) 2002-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -717,7 +717,8 @@ static int _unpack_node_info(bg_info_record_t *bg_info_record, Buf buffer)
 	char *bp_inx_str;
 	
 	safe_unpackstr_xmalloc(&(bg_info_record->nodes), &uint32_tmp, buffer);
-	safe_unpackstr_xmalloc(&(bg_info_record->ionodes), &uint32_tmp, buffer);
+	safe_unpackstr_xmalloc(&(bg_info_record->ionodes), 
+			       &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&bg_info_record->owner_name,
 			       &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&bg_info_record->bg_block_id,
@@ -1351,6 +1352,175 @@ extern char *select_g_sprint_jobinfo(select_jobinfo_t jobinfo,
 	return buf;
 }
 
+/* write select job info to a string
+ * IN jobinfo - a select job credential
+ * IN mode    - print mode, see enum select_print_mode
+ * RET        - char * containing string of request
+ */
+extern char *select_g_xstrdup_jobinfo(select_jobinfo_t jobinfo, int mode)
+{
+	uint16_t geometry[SYSTEM_DIMENSIONS];
+	int i;
+	char max_procs_char[8], start_char[32];
+	char *tmp_image = "default";
+	char *buf = NULL;
+		
+	if ((mode != SELECT_PRINT_DATA)
+	    && jobinfo && (jobinfo->magic != JOBINFO_MAGIC)) {
+		error("select_g_xstrdup_jobinfo: jobinfo magic bad");
+		return NULL;
+	}
+
+	if (jobinfo == NULL) {
+		if (mode != SELECT_PRINT_HEAD) {
+			error("select_g_xstrdup_jobinfo: jobinfo bad");
+			return NULL;
+		}
+	} else if (jobinfo->geometry[0] == (uint16_t) NO_VAL) {
+		for (i=0; i<SYSTEM_DIMENSIONS; i++)
+			geometry[i] = 0;
+	} else {
+		for (i=0; i<SYSTEM_DIMENSIONS; i++)
+			geometry[i] = jobinfo->geometry[i];
+	}
+
+	switch (mode) {
+	case SELECT_PRINT_HEAD:
+		xstrcat(buf, 
+			"CONNECT REBOOT ROTATE MAX_PROCS "
+			"GEOMETRY START BLOCK_ID");
+		break;
+	case SELECT_PRINT_DATA:
+		if (jobinfo->max_procs == NO_VAL)
+			sprintf(max_procs_char, "None");
+		else
+			convert_num_unit((float)jobinfo->max_procs, 
+					 max_procs_char, sizeof(max_procs_char),
+					 UNIT_NONE);
+		if (jobinfo->start[0] == (uint16_t) NO_VAL)
+			sprintf(start_char, "None");
+		else {
+			snprintf(start_char, sizeof(start_char), 
+				"%cx%cx%c",
+				 alpha_num[jobinfo->start[0]],
+				 alpha_num[jobinfo->start[1]],
+				 alpha_num[jobinfo->start[2]]);
+		} 
+		xstrfmtcat(buf, 
+			   "%7.7s %6.6s %6.6s %9s    %cx%cx%c %5s %-16s",
+			   _job_conn_type_string(jobinfo->conn_type),
+			   _yes_no_string(jobinfo->reboot),
+			   _yes_no_string(jobinfo->rotate),
+			   max_procs_char,
+			   alpha_num[geometry[0]],
+			   alpha_num[geometry[1]],
+			   alpha_num[geometry[2]],
+			   start_char, jobinfo->bg_block_id);
+		break;
+	case SELECT_PRINT_MIXED:
+		if (jobinfo->max_procs == NO_VAL)
+			sprintf(max_procs_char, "None");
+		else
+			convert_num_unit((float)jobinfo->max_procs,
+					 max_procs_char, sizeof(max_procs_char),
+					 UNIT_NONE);
+		if (jobinfo->start[0] == (uint16_t) NO_VAL)
+			sprintf(start_char, "None");
+		else {
+			snprintf(start_char, sizeof(start_char),
+				"%cx%cx%c",
+				 alpha_num[jobinfo->start[0]],
+				 alpha_num[jobinfo->start[1]],
+				 alpha_num[jobinfo->start[2]]);
+		}
+		
+		xstrfmtcat(buf, 
+			 "Connection=%s Reboot=%s Rotate=%s MaxProcs=%s "
+			 "Geometry=%cx%cx%c Start=%s Block_ID=%s",
+			 _job_conn_type_string(jobinfo->conn_type),
+			 _yes_no_string(jobinfo->reboot),
+			 _yes_no_string(jobinfo->rotate),
+			 max_procs_char,
+			 alpha_num[geometry[0]],
+			 alpha_num[geometry[1]],
+			 alpha_num[geometry[2]],
+			 start_char, jobinfo->bg_block_id);
+		break;
+	case SELECT_PRINT_BG_ID:
+		xstrfmtcat(buf, "%s", jobinfo->bg_block_id);
+		break;
+	case SELECT_PRINT_NODES:
+		if(jobinfo->ionodes && jobinfo->ionodes[0]) 
+			xstrfmtcat(buf, "%s[%s]",
+				 jobinfo->nodes, jobinfo->ionodes);
+		else
+			xstrfmtcat(buf, "%s", jobinfo->nodes);
+		break;
+	case SELECT_PRINT_CONNECTION:
+		xstrfmtcat(buf, "%s", 
+			 _job_conn_type_string(jobinfo->conn_type));
+		break;
+	case SELECT_PRINT_REBOOT:
+		xstrfmtcat(buf, "%s",
+			 _yes_no_string(jobinfo->reboot));
+		break;
+	case SELECT_PRINT_ROTATE:
+		xstrfmtcat(buf, "%s",
+			 _yes_no_string(jobinfo->rotate));
+		break;
+	case SELECT_PRINT_GEOMETRY:
+		xstrfmtcat(buf, "%cx%cx%c",
+			 alpha_num[geometry[0]],
+			 alpha_num[geometry[1]],
+			 alpha_num[geometry[2]]);
+		break;
+	case SELECT_PRINT_START:
+		if (jobinfo->start[0] == (uint16_t) NO_VAL)
+			sprintf(buf, "None");
+		else {
+			xstrfmtcat(buf, 
+				 "%cx%cx%c",
+				 alpha_num[jobinfo->start[0]],
+				 alpha_num[jobinfo->start[1]],
+				 alpha_num[jobinfo->start[2]]);
+		} 
+	case SELECT_PRINT_MAX_PROCS:
+		if (jobinfo->max_procs == NO_VAL)
+			sprintf(max_procs_char, "None");
+		else
+			convert_num_unit((float)jobinfo->max_procs,
+					 max_procs_char, sizeof(max_procs_char),
+					 UNIT_NONE);
+		
+		xstrfmtcat(buf, "%s", max_procs_char);
+		break;
+	case SELECT_PRINT_BLRTS_IMAGE:
+		if(jobinfo->blrtsimage)
+			tmp_image = jobinfo->blrtsimage;
+		xstrfmtcat(buf, "%s", tmp_image);		
+		break;
+	case SELECT_PRINT_LINUX_IMAGE:
+		if(jobinfo->linuximage)
+			tmp_image = jobinfo->linuximage;
+		xstrfmtcat(buf, "%s", tmp_image);		
+		break;
+	case SELECT_PRINT_MLOADER_IMAGE:
+		if(jobinfo->mloaderimage)
+			tmp_image = jobinfo->mloaderimage;
+		xstrfmtcat(buf, "%s", tmp_image);		
+		break;
+	case SELECT_PRINT_RAMDISK_IMAGE:
+		if(jobinfo->ramdiskimage)
+			tmp_image = jobinfo->ramdiskimage;
+		xstrfmtcat(buf, "%s", tmp_image);		
+		break;		
+	default:
+		error("select_g_xstrdup_jobinfo: bad mode %d", mode);
+	}
+	
+	return buf;
+}
+
 /* Unpack node select info from a buffer */
 extern int select_g_unpack_node_info(node_select_info_msg_t **
 		node_select_info_msg_pptr, Buf buffer)
@@ -1488,6 +1658,15 @@ extern char *select_g_sprint_jobinfo(select_jobinfo_t jobinfo,
 	} else
 		return NULL;
 }
+/* write select job info to a string
+ * IN jobinfo - a select job credential
+ * IN mode    - print mode, see enum select_print_mode
+ * RET        - char * containing string of request
+ */
+extern char *select_g_xstrdup_jobinfo(select_jobinfo_t jobinfo, int mode)
+{
+	return NULL;
+}
 
 extern int select_g_unpack_node_info(node_select_info_msg_t **
 		node_select_info_msg_pptr, Buf buffer)
diff --git a/src/common/node_select.h b/src/common/node_select.h
index 46e1defa2..7c0d957ae 100644
--- a/src/common/node_select.h
+++ b/src/common/node_select.h
@@ -1,7 +1,7 @@
 /*****************************************************************************\
  *  node_select.h - Define node selection plugin functions.
  *
- * $Id: node_select.h 13672 2008-03-19 23:10:58Z jette $
+ * $Id: node_select.h 15324 2008-10-07 00:16:53Z da $
  *****************************************************************************
  *  Copyright (C) 2004-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -310,9 +310,9 @@ extern int  select_g_pack_jobinfo  (select_jobinfo_t jobinfo, Buf buffer);
  */
 extern int  select_g_unpack_jobinfo(select_jobinfo_t jobinfo, Buf buffer);
 
-/* write select job credential to a string
+/* write select job info to a string
  * IN jobinfo - a select job credential
- * OUT buf    - location to write job credential contents
+ * OUT buf    - location to write job info contents
  * IN size    - byte size of buf
  * IN mode    - print mode, see enum select_print_mode
  * RET        - the string, same as buf
@@ -320,6 +320,14 @@ extern int  select_g_unpack_jobinfo(select_jobinfo_t jobinfo, Buf buffer);
 extern char *select_g_sprint_jobinfo(select_jobinfo_t jobinfo,
 				     char *buf, size_t size, int mode);
 
+/* write select job info to a string
+ * IN jobinfo - a select job credential
+ * OUT buf    - location to write job info contents
+ * IN mode    - print mode, see enum select_print_mode
+ * RET        - the string, same as buf
+ */
+extern char *select_g_xstrdup_jobinfo(select_jobinfo_t jobinfo, int mode);
+
 /* Prepare to start a job step, allocate memory as needed
  * RET - slurm error code
  */
diff --git a/src/common/parse_time.c b/src/common/parse_time.c
index 33c0e7f42..879c8d1f0 100644
--- a/src/common/parse_time.c
+++ b/src/common/parse_time.c
@@ -254,12 +254,15 @@ static int _get_date(char *time_str, int *pos, int *month, int *mday, int *year)
  *   midnight, noon, teatime (4PM)
  *   HH:MM[:SS] [AM|PM]
  *   MMDD[YY] or MM/DD[/YY] or MM.DD[.YY]
+ *   MM/DD[/YY]-HH:MM[:SS]
  *   now + count [minutes | hours | days | weeks]
  * 
  * Invalid input results in message to stderr and return value of zero
  * NOTE: not thread safe
+ * NOTE: by default this will look into the future for the next time.
+ * if you want to look in the past set the past flag.
  */
-extern time_t parse_time(char *time_str)
+extern time_t parse_time(char *time_str, int past)
 {
 	int    hour = -1, minute = -1, second = 0;
 	int    month = -1, mday = -1, year = -1;
@@ -338,6 +341,7 @@ extern time_t parse_time(char *time_str)
 			second   = later_tm->tm_sec;
 			continue;
 		}
+
 		if ((time_str[pos] < '0') || (time_str[pos] > '9'))	/* invalid */
 			goto prob;
 		/* We have some numeric value to process */
@@ -358,9 +362,11 @@ extern time_t parse_time(char *time_str)
 		hour = 0;
 		minute = 0;
 	}
-	else if ((hour != -1) && (month == -1)) {	/* time, no date implies soonest day */
-		if ((hour >  time_now_tm->tm_hour)
-		||  ((hour == time_now_tm->tm_hour) && (minute > time_now_tm->tm_min))) {
+	else if ((hour != -1) && (month == -1)) {	
+		/* time, no date implies soonest day */
+		if (past || (hour >  time_now_tm->tm_hour)
+		    ||  ((hour == time_now_tm->tm_hour) 
+			 && (minute > time_now_tm->tm_min))) {
 			/* today */
 			month = time_now_tm->tm_mon;
 			mday  = time_now_tm->tm_mday;
@@ -371,16 +377,19 @@ extern time_t parse_time(char *time_str)
 			month = later_tm->tm_mon;
 			mday  = later_tm->tm_mday;
 			year  = later_tm->tm_year;
-
 		}
 	}
 	if (year == -1) {
-		if ((month  >  time_now_tm->tm_mon)
-		||  ((month == time_now_tm->tm_mon) && (mday >  time_now_tm->tm_mday))
-		||  ((month == time_now_tm->tm_mon) && (mday == time_now_tm->tm_mday)
-		  && (hour >  time_now_tm->tm_hour)) 
-		||  ((month == time_now_tm->tm_mon) && (mday == time_now_tm->tm_mday)
-		  && (hour == time_now_tm->tm_hour) && (minute > time_now_tm->tm_min))) {
+		if (past || (month  >  time_now_tm->tm_mon)
+		    ||  ((month == time_now_tm->tm_mon) 
+			 && (mday >  time_now_tm->tm_mday))
+		    ||  ((month == time_now_tm->tm_mon) 
+			 && (mday == time_now_tm->tm_mday)
+			 && (hour >  time_now_tm->tm_hour)) 
+		    ||  ((month == time_now_tm->tm_mon) 
+			 && (mday == time_now_tm->tm_mday)
+			 && (hour == time_now_tm->tm_hour) 
+			 && (minute > time_now_tm->tm_min))) {
 			/* this year */
 			year = time_now_tm->tm_year;
 		} else {
diff --git a/src/common/parse_time.h b/src/common/parse_time.h
index 0189f81aa..c891bd073 100644
--- a/src/common/parse_time.h
+++ b/src/common/parse_time.h
@@ -64,7 +64,7 @@
  *
  * Invalid input results in message to stderr and return value of zero
  */
-extern time_t parse_time(char *time_str);
+extern time_t parse_time(char *time_str, int past);
 
 /*
  * slurm_make_time_str - convert time_t to string with a format of
diff --git a/src/common/plugstack.c b/src/common/plugstack.c
index 9776df489..be19573d3 100644
--- a/src/common/plugstack.c
+++ b/src/common/plugstack.c
@@ -1,7 +1,8 @@
 /*****************************************************************************\
  *  plugstack.c -- stackable plugin architecture for node job kontrol (SPANK)
  *****************************************************************************
- *  Copyright (C) 2005 The Regents of the University of California.
+ *  Copyright (C) 2005-2007 The Regents of the University of California.
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  LLNL-CODE-402394.
  *
@@ -224,12 +225,6 @@ _plugin_stack_parse_line(char *line, char **plugin, int *acp, char ***argv,
 	if ((s = strchr(line, '#')))
 		*s = '\0';
 
-	/*
-	 * Remove trailing whitespace
-	 */
-	for (s = line + strlen (line) - 1; isspace (*s) || *s == '\n'; s--)
-		*s = '\0';
-
 	if (!(option = strtok_r(line, separators, &sp)))
 		return (0);
 
diff --git a/src/common/print_fields.c b/src/common/print_fields.c
index e95864f06..707403f5a 100644
--- a/src/common/print_fields.c
+++ b/src/common/print_fields.c
@@ -125,15 +125,16 @@ extern void print_fields_str(print_field_t *field, char *value, int last)
 			print_this = "";
 		else
 			print_this = " ";
-	}
+	} else
+		print_this = value;
 	
 	if(print_fields_parsable_print == PRINT_FIELDS_PARSABLE_NO_ENDING
 	   && last)
-		printf("%s", value);	
+		printf("%s", print_this);	
 	else if(print_fields_parsable_print)
-		printf("%s|", value);
+		printf("%s|", print_this);
 	else {
-		if(!print_this) {
+		if(value) {
 			memcpy(&temp_char, value, field->len);
 			
 			if(strlen(value) > field->len) 
@@ -144,6 +145,30 @@ extern void print_fields_str(print_field_t *field, char *value, int last)
 	}
 }
 
+extern void print_fields_int(print_field_t *field, int value, int last)
+{
+	/* (value == unset)  || (value == cleared) */
+	if((value == NO_VAL) || (value == INFINITE)) {
+		if(print_fields_parsable_print 
+		   == PRINT_FIELDS_PARSABLE_NO_ENDING
+		   && last)
+			;
+		else if(print_fields_parsable_print)
+			printf("|");	
+		else				
+			printf("%*s ", field->len, " ");
+	} else {
+		if(print_fields_parsable_print
+		   == PRINT_FIELDS_PARSABLE_NO_ENDING
+		   && last)
+			printf("%d", value);	
+		else if(print_fields_parsable_print)
+			printf("%d|", value);	
+		else
+			printf("%*d ", field->len, value);
+	}
+}
+
 extern void print_fields_uint32(print_field_t *field, uint32_t value, int last)
 {
 	/* (value == unset)  || (value == cleared) */
diff --git a/src/common/print_fields.h b/src/common/print_fields.h
index 36eed4add..8de6d6194 100644
--- a/src/common/print_fields.h
+++ b/src/common/print_fields.h
@@ -87,6 +87,7 @@ extern void destroy_print_field(void *object);
 extern void print_fields_header(List print_fields_list);
 extern void print_fields_date(print_field_t *field, time_t value, int last);
 extern void print_fields_str(print_field_t *field, char *value, int last);
+extern void print_fields_int(print_field_t *field, int value, int last);
 extern void print_fields_uint32(
 	print_field_t *field, uint32_t value, int last);
 extern void print_fields_uint64(
diff --git a/src/common/read_config.c b/src/common/read_config.c
index d683c7a3f..8a6b6145c 100644
--- a/src/common/read_config.c
+++ b/src/common/read_config.c
@@ -200,6 +200,7 @@ s_p_options_t slurm_conf_options[] = {
 	{"ResumeProgram", S_P_STRING},
 	{"ResumeRate", S_P_UINT16},
 	{"ReturnToService", S_P_UINT16},
+	{"SallocDefaultCommand", S_P_STRING},
 	{"SchedulerAuth", S_P_STRING, defunct_option},
 	{"SchedulerParameters", S_P_STRING},
 	{"SchedulerPort", S_P_UINT16},
@@ -809,6 +810,7 @@ static int _register_conf_node_aliases(slurm_conf_node_t *node_ptr)
 		      "in FRONT_END mode");
 		goto cleanup;
 	}
+
 	hostname = node_ptr->hostnames;
 	address = node_ptr->addresses;
 #else
@@ -824,15 +826,20 @@ static int _register_conf_node_aliases(slurm_conf_node_t *node_ptr)
 #endif
 
 	/* now build the individual node structures */
+#ifdef HAVE_FRONT_END
+	/* we always want the first on in the list to be the one
+	 * returned when looking for localhost
+	 */
+	while ((alias = hostlist_pop(alias_list))) {
+#else
 	while ((alias = hostlist_shift(alias_list))) {
-#ifndef HAVE_FRONT_END
 		hostname = hostlist_shift(hostname_list);
 		address = hostlist_shift(address_list);
 #endif
 
 		_push_to_hashtbls(alias, hostname, address, node_ptr->port,
-					node_ptr->cpus, node_ptr->sockets,
-					node_ptr->cores, node_ptr->threads);
+				  node_ptr->cpus, node_ptr->sockets,
+				  node_ptr->cores, node_ptr->threads);
 
 		free(alias);
 #ifndef HAVE_FRONT_END
@@ -1163,6 +1170,7 @@ free_slurm_conf (slurm_ctl_conf_t *ctl_conf_ptr, bool purge_node_hash)
 	xfree (ctl_conf_ptr->propagate_rlimits_except);
 	xfree (ctl_conf_ptr->propagate_rlimits);
 	xfree (ctl_conf_ptr->resume_program);
+	xfree (ctl_conf_ptr->salloc_default_command);
 	xfree (ctl_conf_ptr->slurm_conf);
 	xfree (ctl_conf_ptr->sched_params);
 	xfree (ctl_conf_ptr->schedtype);
@@ -1259,6 +1267,7 @@ init_slurm_conf (slurm_ctl_conf_t *ctl_conf_ptr)
 	xfree (ctl_conf_ptr->resume_program);
 	ctl_conf_ptr->resume_rate		= (uint16_t) NO_VAL;
 	ctl_conf_ptr->ret2service		= (uint16_t) NO_VAL;
+	xfree( ctl_conf_ptr->salloc_default_command);
 	xfree( ctl_conf_ptr->sched_params );
 	ctl_conf_ptr->sched_time_slice		= (uint16_t) NO_VAL;
 	xfree( ctl_conf_ptr->schedtype );
@@ -1875,6 +1884,9 @@ validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 	if (!s_p_get_uint16(&conf->resume_rate, "ResumeRate", hashtbl))
 		conf->resume_rate = DEFAULT_RESUME_RATE;
 
+	s_p_get_string(&conf->salloc_default_command, "SallocDefaultCommand",
+			hashtbl);
+
 	s_p_get_string(&conf->sched_params, "SchedulerParameters", hashtbl);
 
 	if (s_p_get_uint16(&conf->schedport, "SchedulerPort", hashtbl)) {
diff --git a/src/common/read_config.h b/src/common/read_config.h
index 5c6e52774..6947325a9 100644
--- a/src/common/read_config.h
+++ b/src/common/read_config.h
@@ -50,7 +50,13 @@ extern char *default_slurm_config_file;
 extern char *default_plugin_path;
 extern char *default_plugstack;
 
-#define DEFAULT_ACCOUNTING_ENFORCE  0
+enum {
+	ACCOUNTING_ENFORCE_NONE,
+	ACCOUNTING_ENFORCE_YES,
+	ACCOUNTING_ENFORCE_WITH_LIMITS
+};
+
+#define DEFAULT_ACCOUNTING_ENFORCE  ACCOUNTING_ENFORCE_NONE
 #define DEFAULT_ACCOUNTING_STORAGE_TYPE "accounting_storage/none"
 #define DEFAULT_AUTH_TYPE          "auth/none"
 #define DEFAULT_CACHE_GROUPS        0
diff --git a/src/common/slurm_accounting_storage.c b/src/common/slurm_accounting_storage.c
index cb10b7b2f..8ce4ae862 100644
--- a/src/common/slurm_accounting_storage.c
+++ b/src/common/slurm_accounting_storage.c
@@ -58,7 +58,8 @@
  */
 
 typedef struct slurm_acct_storage_ops {
-	void *(*get_conn)          (bool make_agent, bool rollback);
+	void *(*get_conn)          (bool make_agent, int conn_num, 
+				    bool rollback);
 	int  (*close_conn)         (void **db_conn);
 	int  (*commit)             (void *db_conn, bool commit);
 	int  (*add_users)          (void *db_conn, uint32_t uid,
@@ -86,6 +87,9 @@ typedef struct slurm_acct_storage_ops {
 	List (*modify_associations)(void *db_conn, uint32_t uid,
 				    acct_association_cond_t *assoc_cond,
 				    acct_association_rec_t *assoc);
+	List (*modify_qos)         (void *db_conn, uint32_t uid,
+				    acct_qos_cond_t *qos_cond,
+				    acct_qos_rec_t *qos);
 	List (*remove_users)       (void *db_conn, uint32_t uid,
 				    acct_user_cond_t *user_cond);
 	List (*remove_coord)       (void *db_conn, uint32_t uid,
@@ -133,7 +137,7 @@ typedef struct slurm_acct_storage_ops {
 				    void *cluster_rec, 
 				    time_t start, time_t end);
 	int  (*register_ctld)      (char *cluster, uint16_t port);
-	int  (*job_start)          (void *db_conn,
+	int  (*job_start)          (void *db_conn, char *cluster_name,
 				    struct job_record *job_ptr);
 	int  (*job_complete)       (void *db_conn,
 				    struct job_record *job_ptr);
@@ -203,6 +207,7 @@ static slurm_acct_storage_ops_t * _acct_storage_get_ops(
 		"acct_storage_p_modify_accounts",
 		"acct_storage_p_modify_clusters",
 		"acct_storage_p_modify_associations",
+		"acct_storage_p_modify_qos",
 		"acct_storage_p_remove_users",
 		"acct_storage_p_remove_coord",
 		"acct_storage_p_remove_accts",
@@ -338,8 +343,6 @@ extern void destroy_acct_user_rec(void *object)
 			list_destroy(acct_user->coord_accts);
 		xfree(acct_user->default_acct);
 		xfree(acct_user->name);
-		if(acct_user->qos_list)
-			list_destroy(acct_user->qos_list);
 		xfree(acct_user);
 	}
 }
@@ -357,8 +360,6 @@ extern void destroy_acct_account_rec(void *object)
 		xfree(acct_account->description);
 		xfree(acct_account->name);
 		xfree(acct_account->organization);
-		if(acct_account->qos_list)
-			list_destroy(acct_account->qos_list);
 		xfree(acct_account);
 	}
 }
@@ -394,6 +395,9 @@ extern void destroy_acct_cluster_rec(void *object)
 			list_destroy(acct_cluster->accounting_list);
 		xfree(acct_cluster->control_host);
 		xfree(acct_cluster->name);
+		if(acct_cluster->valid_qos_list)
+			list_destroy(acct_cluster->valid_qos_list);
+		destroy_acct_association_rec(acct_cluster->root_assoc);
 		xfree(acct_cluster);
 	}
 }
@@ -420,6 +424,8 @@ extern void destroy_acct_association_rec(void *object)
 		xfree(acct_association->cluster);
 		xfree(acct_association->parent_acct);
 		xfree(acct_association->partition);
+		if(acct_association->qos_list)
+			list_destroy(acct_association->qos_list);
 		xfree(acct_association->user);
 		xfree(acct_association);
 	}
@@ -430,7 +436,16 @@ extern void destroy_acct_qos_rec(void *object)
 	acct_qos_rec_t *acct_qos = (acct_qos_rec_t *)object;
 	if(acct_qos) {
 		xfree(acct_qos->description);
+		xfree(acct_qos->job_flags);
+		if(acct_qos->job_list)
+			list_destroy(acct_qos->job_list);
 		xfree(acct_qos->name);
+		if(acct_qos->preemptee_list)
+			list_destroy(acct_qos->preemptee_list);
+		if(acct_qos->preemptor_list)
+			list_destroy(acct_qos->preemptor_list);
+		if(acct_qos->user_limit_list)
+			list_destroy(acct_qos->user_limit_list);
 		xfree(acct_qos);
 	}
 }
@@ -439,8 +454,11 @@ extern void destroy_acct_txn_rec(void *object)
 {
 	acct_txn_rec_t *acct_txn = (acct_txn_rec_t *)object;
 	if(acct_txn) {
+		xfree(acct_txn->accts);
 		xfree(acct_txn->actor_name);
+		xfree(acct_txn->clusters);
 		xfree(acct_txn->set_info);
+		xfree(acct_txn->users);
 		xfree(acct_txn->where_query);
 		xfree(acct_txn);
 	}
@@ -454,8 +472,6 @@ extern void destroy_acct_user_cond(void *object)
 		destroy_acct_association_cond(acct_user->assoc_cond);
 		if(acct_user->def_acct_list)
 			list_destroy(acct_user->def_acct_list);
-		if(acct_user->qos_list)
-			list_destroy(acct_user->qos_list);
 		xfree(acct_user);
 	}
 }
@@ -471,8 +487,6 @@ extern void destroy_acct_account_cond(void *object)
 			list_destroy(acct_account->description_list);
 		if(acct_account->organization_list)
 			list_destroy(acct_account->organization_list);
-		if(acct_account->qos_list)
-			list_destroy(acct_account->qos_list);
 		xfree(acct_account);
 	}
 }
@@ -499,11 +513,47 @@ extern void destroy_acct_association_cond(void *object)
 			list_destroy(acct_association->acct_list);
 		if(acct_association->cluster_list)
 			list_destroy(acct_association->cluster_list);
+
+		if(acct_association->fairshare_list)
+			list_destroy(acct_association->fairshare_list);
+
+		if(acct_association->grp_cpu_mins_list)
+			list_destroy(acct_association->grp_cpu_mins_list);
+		if(acct_association->grp_cpus_list)
+			list_destroy(acct_association->grp_cpus_list);
+		if(acct_association->grp_jobs_list)
+			list_destroy(acct_association->grp_jobs_list);
+		if(acct_association->grp_nodes_list)
+			list_destroy(acct_association->grp_nodes_list);
+		if(acct_association->grp_submit_jobs_list)
+			list_destroy(acct_association->grp_submit_jobs_list);
+		if(acct_association->grp_wall_list)
+			list_destroy(acct_association->grp_wall_list);
+
 		if(acct_association->id_list)
 			list_destroy(acct_association->id_list);
+
+		if(acct_association->max_cpu_mins_pj_list)
+			list_destroy(acct_association->max_cpu_mins_pj_list);
+		if(acct_association->max_cpus_pj_list)
+			list_destroy(acct_association->max_cpus_pj_list);
+		if(acct_association->max_jobs_list)
+			list_destroy(acct_association->max_jobs_list);
+		if(acct_association->max_nodes_pj_list)
+			list_destroy(acct_association->max_nodes_pj_list);
+		if(acct_association->max_submit_jobs_list)
+			list_destroy(acct_association->max_submit_jobs_list);
+		if(acct_association->max_wall_pj_list)
+			list_destroy(acct_association->max_wall_pj_list);
+
 		if(acct_association->partition_list)
 			list_destroy(acct_association->partition_list);
-		xfree(acct_association->parent_acct);
+
+		if(acct_association->parent_acct_list)
+			list_destroy(acct_association->parent_acct_list);
+
+		if(acct_association->qos_list)
+			list_destroy(acct_association->qos_list);
 		if(acct_association->user_list)
 			list_destroy(acct_association->user_list);
 		xfree(acct_association);
@@ -552,12 +602,22 @@ extern void destroy_acct_txn_cond(void *object)
 {
 	acct_txn_cond_t *acct_txn = (acct_txn_cond_t *)object;
 	if(acct_txn) {
+		if(acct_txn->acct_list)
+			list_destroy(acct_txn->acct_list);
 		if(acct_txn->action_list)
 			list_destroy(acct_txn->action_list);
 		if(acct_txn->actor_list)
 			list_destroy(acct_txn->actor_list);
+		if(acct_txn->cluster_list)
+			list_destroy(acct_txn->cluster_list);
 		if(acct_txn->id_list)
 			list_destroy(acct_txn->id_list);
+		if(acct_txn->info_list)
+			list_destroy(acct_txn->info_list);
+		if(acct_txn->name_list)
+			list_destroy(acct_txn->name_list);
+		if(acct_txn->user_list)
+			list_destroy(acct_txn->user_list);
 		xfree(acct_txn);
 	}
 }
@@ -568,130 +628,271 @@ extern void destroy_acct_update_object(void *object)
 		(acct_update_object_t *) object;
 
 	if(acct_update) {
-		if(acct_update->objects)
+		if(acct_update->objects) 
 			list_destroy(acct_update->objects);
+		
 		xfree(acct_update);
 	}
 }
 
+extern void destroy_acct_used_limits(void *object)
+{
+	acct_used_limits_t *acct_used_limits = (acct_used_limits_t *)object;
+
+	if(acct_used_limits) {
+		xfree(acct_used_limits);
+	}
+}
+
 extern void destroy_update_shares_rec(void *object)
 {
 	xfree(object);
 }
 
+extern void destroy_acct_print_tree(void *object)
+{
+	acct_print_tree_t *acct_print_tree = (acct_print_tree_t *)object;
+
+	if(acct_print_tree) {
+		xfree(acct_print_tree->name);
+		xfree(acct_print_tree->print_name);
+		xfree(acct_print_tree->spaces);
+		xfree(acct_print_tree);
+	}
+}
+
+extern void init_acct_association_rec(acct_association_rec_t *assoc)
+{
+	if(!assoc)
+		return;
+
+	memset(assoc, 0, sizeof(acct_association_rec_t));
+
+	assoc->fairshare = NO_VAL;
+
+	assoc->grp_cpu_mins = NO_VAL;
+	assoc->grp_cpus = NO_VAL;
+	assoc->grp_jobs = NO_VAL;
+	assoc->grp_nodes = NO_VAL;
+	assoc->grp_submit_jobs = NO_VAL;
+	assoc->grp_wall = NO_VAL;
+
+	assoc->max_cpu_mins_pj = NO_VAL;
+	assoc->max_cpus_pj = NO_VAL;
+	assoc->max_jobs = NO_VAL;
+	assoc->max_nodes_pj = NO_VAL;
+	assoc->max_submit_jobs = NO_VAL;
+	assoc->max_wall_pj = NO_VAL;
+}
+
+extern void init_acct_qos_rec(acct_qos_rec_t *qos)
+{
+	if(!qos)
+		return;
+
+	memset(qos, 0, sizeof(acct_qos_rec_t));
+
+	qos->priority = NO_VAL;
+
+	qos->grp_cpu_mins = NO_VAL;
+	qos->grp_cpus = NO_VAL;
+	qos->grp_jobs = NO_VAL;
+	qos->grp_nodes = NO_VAL;
+	qos->grp_submit_jobs = NO_VAL;
+	qos->grp_wall = NO_VAL;
+
+	qos->max_cpu_mins_pu = NO_VAL;
+	qos->max_cpus_pu = NO_VAL;
+	qos->max_jobs_pu = NO_VAL;
+	qos->max_nodes_pu = NO_VAL;
+	qos->max_submit_jobs_pu = NO_VAL;
+	qos->max_wall_pu = NO_VAL;
+}
+
 /****************************************************************************\
  * Pack and unpack data structures
 \****************************************************************************/
-extern void pack_acct_user_rec(void *in, Buf buffer)
+extern void pack_acct_user_rec(void *in, uint16_t rpc_version, Buf buffer)
 {
 	ListIterator itr = NULL;
 	acct_user_rec_t *object = (acct_user_rec_t *)in;
 	uint32_t count = NO_VAL;
 	acct_coord_rec_t *coord = NULL;
 	acct_association_rec_t *assoc = NULL;
-	char *tmp_info = NULL;
 
-	if(!object) {
-		pack16(0, buffer);
-		pack32(NO_VAL, buffer);
-		pack32(NO_VAL, buffer);
-		packnull(buffer);
-		packnull(buffer);
-		pack32(NO_VAL, buffer);
-		pack32(0, buffer);
-		return;
-	}
+	if(rpc_version < 3) {
+		if(!object) {
+			pack16(0, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			packnull(buffer);
+			packnull(buffer);
+			pack32(NO_VAL, buffer);
+			pack32(0, buffer);
+			return;
+		}
  
-	pack16((uint16_t)object->admin_level, buffer);
-	if(object->assoc_list)
-		count = list_count(object->assoc_list);
+		pack16(object->admin_level, buffer);
+		if(object->assoc_list)
+			count = list_count(object->assoc_list);
 	
-	pack32(count, buffer);
-	if(count && count != NO_VAL) {
-		itr = list_iterator_create(object->assoc_list);
-		while((assoc = list_next(itr))) {
-			pack_acct_association_rec(assoc, buffer);
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->assoc_list);
+			while((assoc = list_next(itr))) {
+				pack_acct_association_rec(assoc, rpc_version, 
+							  buffer);
+			}
+			list_iterator_destroy(itr);
 		}
-		list_iterator_destroy(itr);
-	}
-	count = NO_VAL;
+		count = NO_VAL;
 
-	if(object->coord_accts)
-		count = list_count(object->coord_accts);
+		if(object->coord_accts)
+			count = list_count(object->coord_accts);
 	
-	pack32(count, buffer);
-	if(count && count != NO_VAL) {
-		itr = list_iterator_create(object->coord_accts);
-		while((coord = list_next(itr))) {
-			pack_acct_coord_rec(coord, buffer);
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->coord_accts);
+			while((coord = list_next(itr))) {
+				pack_acct_coord_rec(coord, rpc_version, buffer);
+			}
+			list_iterator_destroy(itr);
 		}
-		list_iterator_destroy(itr);
-	}
-	count = NO_VAL;
+		count = NO_VAL;
 
-	packstr(object->default_acct, buffer);
-	packstr(object->name, buffer);
+		packstr(object->default_acct, buffer);
+		packstr(object->name, buffer);
+
+		pack32(count, buffer); // NEEDED for old qos_list
+
+		pack32(object->uid, buffer);
+	} else if(rpc_version >= 3) {
+		if(!object) {
+			pack16(0, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			packnull(buffer);
+			packnull(buffer);
+			pack32(0, buffer);
+			return;
+		}
+ 
+		pack16(object->admin_level, buffer);
+		if(object->assoc_list)
+			count = list_count(object->assoc_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->assoc_list);
+			while((assoc = list_next(itr))) {
+				pack_acct_association_rec(assoc, rpc_version,
+							  buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
 
-	if(object->qos_list)
-		count = list_count(object->qos_list);
+		if(object->coord_accts)
+			count = list_count(object->coord_accts);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->coord_accts);
+			while((coord = list_next(itr))) {
+				pack_acct_coord_rec(coord, rpc_version, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
 
-	pack32(count, buffer);
+		packstr(object->default_acct, buffer);
+		packstr(object->name, buffer);
 
-	if(count && count != NO_VAL) {
-		itr = list_iterator_create(object->qos_list);
-		while((tmp_info = list_next(itr))) {
-			packstr(tmp_info, buffer);
-		}
-		list_iterator_destroy(itr);
+		pack32(object->uid, buffer);	
 	}
-	count = NO_VAL;
-	pack32(object->uid, buffer);
 }
 
-extern int unpack_acct_user_rec(void **object, Buf buffer)
+extern int unpack_acct_user_rec(void **object, uint16_t rpc_version, Buf buffer)
 {
 	uint32_t uint32_tmp;
+	char *tmp_info = NULL;
 	acct_user_rec_t *object_ptr = xmalloc(sizeof(acct_user_rec_t));
 	uint32_t count = NO_VAL;
 	acct_coord_rec_t *coord = NULL;
 	acct_association_rec_t *assoc = NULL;
 	int i;
-	char *tmp_info = NULL;
 
 	*object = object_ptr;
-	safe_unpack16((uint16_t *)&object_ptr->admin_level, buffer);
-	safe_unpack32(&count, buffer);
-	if(count != NO_VAL) {
-		object_ptr->assoc_list =
-			list_create(destroy_acct_association_rec);
-		for(i=0; i<count; i++) {
-			if(unpack_acct_association_rec((void *)&assoc, buffer)
-			   == SLURM_ERROR)
-				goto unpack_error;
-			list_append(object_ptr->assoc_list, assoc);
+	
+	if(rpc_version < 3) {
+		safe_unpack16(&object_ptr->admin_level, buffer);
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->assoc_list =
+				list_create(destroy_acct_association_rec);
+			for(i=0; i<count; i++) {
+				if(unpack_acct_association_rec(
+					   (void *)&assoc, rpc_version, buffer)
+				   == SLURM_ERROR)
+					goto unpack_error;
+				list_append(object_ptr->assoc_list, assoc);
+			}
 		}
-	}
-	safe_unpack32(&count, buffer);
-	if(count != NO_VAL) {
-		object_ptr->coord_accts = list_create(destroy_acct_coord_rec);
-		for(i=0; i<count; i++) {
-			if(unpack_acct_coord_rec((void *)&coord, buffer)
-			   == SLURM_ERROR)
-				goto unpack_error;
-			list_append(object_ptr->coord_accts, coord);
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->coord_accts =
+				list_create(destroy_acct_coord_rec);
+			for(i=0; i<count; i++) {
+				if(unpack_acct_coord_rec((void *)&coord, 
+							 rpc_version, buffer)
+				   == SLURM_ERROR)
+					goto unpack_error;
+				list_append(object_ptr->coord_accts, coord);
+			}
 		}
-	}
-	safe_unpackstr_xmalloc(&object_ptr->default_acct, &uint32_tmp, buffer);
-	safe_unpackstr_xmalloc(&object_ptr->name, &uint32_tmp, buffer);
-	safe_unpack32(&count, buffer);
-	if(count != NO_VAL) {
-		object_ptr->qos_list = list_create(slurm_destroy_char);
-		for(i=0; i<count; i++) {
-			safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer);
-			list_append(object_ptr->qos_list, tmp_info);
+		safe_unpackstr_xmalloc(&object_ptr->default_acct, &uint32_tmp,
+				       buffer);
+		safe_unpackstr_xmalloc(&object_ptr->name, &uint32_tmp, buffer);
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				xfree(tmp_info);
+			}
+		}
+		safe_unpack32(&object_ptr->uid, buffer);
+	} else if(rpc_version >= 3) {
+		safe_unpack16(&object_ptr->admin_level, buffer);
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->assoc_list =
+				list_create(destroy_acct_association_rec);
+			for(i=0; i<count; i++) {
+				if(unpack_acct_association_rec(
+					   (void *)&assoc, rpc_version, buffer)
+				   == SLURM_ERROR)
+					goto unpack_error;
+				list_append(object_ptr->assoc_list, assoc);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->coord_accts =
+				list_create(destroy_acct_coord_rec);
+			for(i=0; i<count; i++) {
+				if(unpack_acct_coord_rec((void *)&coord, 
+							 rpc_version, buffer)
+				   == SLURM_ERROR)
+					goto unpack_error;
+				list_append(object_ptr->coord_accts, coord);
+			}
 		}
+		safe_unpackstr_xmalloc(&object_ptr->default_acct, &uint32_tmp,
+				       buffer);
+		safe_unpackstr_xmalloc(&object_ptr->name, &uint32_tmp, buffer);
+		safe_unpack32(&object_ptr->uid, buffer);
 	}
-	safe_unpack32(&object_ptr->uid, buffer);
 
 	return SLURM_SUCCESS;
 
@@ -701,16 +902,71 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-extern void pack_update_shares_used(void *in, Buf buffer)
+extern void pack_acct_used_limits(void *in, uint16_t rpc_version, Buf buffer)
+{
+	acct_used_limits_t *object = (acct_used_limits_t *)in;
+
+	if(!object) {
+		pack64(0, buffer);
+		pack32(0, buffer);
+		pack32(0, buffer);
+		pack32(0, buffer);
+		pack32(0, buffer);
+		pack32(0, buffer);
+		pack32(0, buffer);
+		return;
+	}
+	
+	pack64(object->cpu_mins, buffer);
+	pack32(object->cpus, buffer);
+	pack32(object->jobs, buffer);
+	pack32(object->nodes, buffer);
+	pack32(object->submit_jobs, buffer);
+	pack32(object->wall, buffer);
+	pack32(object->uid, buffer);
+}
+
+extern int unpack_acct_used_limits(void **object,
+				   uint16_t rpc_version, Buf buffer)
+{
+	acct_used_limits_t *object_ptr = xmalloc(sizeof(shares_used_object_t));
+
+	*object = (void *)object_ptr;
+
+	safe_unpack64(&object_ptr->cpu_mins, buffer);
+	safe_unpack32(&object_ptr->cpus, buffer);
+	safe_unpack32(&object_ptr->jobs, buffer);
+	safe_unpack32(&object_ptr->nodes, buffer);
+	safe_unpack32(&object_ptr->submit_jobs, buffer);
+	safe_unpack32(&object_ptr->wall, buffer);
+	safe_unpack32(&object_ptr->uid, buffer);
+	return SLURM_SUCCESS;
+
+unpack_error:
+	destroy_acct_used_limits(object_ptr);
+	*object = NULL;
+	return SLURM_ERROR;
+}
+
+
+extern void pack_update_shares_used(void *in, uint16_t rpc_version, Buf buffer)
 {
 	shares_used_object_t *object = (shares_used_object_t *)in;
+
+	if(!object) {
+		pack32(0, buffer);
+		pack32(0, buffer);
+		return;
+	}
+
 	pack32(object->assoc_id, buffer);
 	pack32(object->shares_used, buffer);
 }
 
-extern int unpack_update_shares_used(void **object, Buf buffer)
+extern int unpack_update_shares_used(void **object, uint16_t rpc_version, Buf buffer)
 {
-	shares_used_object_t *object_ptr = xmalloc(sizeof(shares_used_object_t));
+	shares_used_object_t *object_ptr =
+		xmalloc(sizeof(shares_used_object_t));
 
 	*object = (void *) object_ptr;
 	safe_unpack32(&object_ptr->assoc_id, buffer);
@@ -723,113 +979,183 @@ unpack_error:
 	*object = NULL;
 	return SLURM_ERROR;
 }
-extern void pack_acct_account_rec(void *in, Buf buffer)
+extern void pack_acct_account_rec(void *in, uint16_t rpc_version, Buf buffer)
 {
 	acct_coord_rec_t *coord = NULL;
 	ListIterator itr = NULL;
 	uint32_t count = NO_VAL;
 	acct_account_rec_t *object = (acct_account_rec_t *)in;
 	acct_association_rec_t *assoc = NULL;
-	char *tmp_info = NULL;
 
-	if(!object) {
-		pack32(NO_VAL, buffer);
-		pack32(NO_VAL, buffer);
-		packnull(buffer);
-		packnull(buffer);
-		packnull(buffer);
-		pack32(NO_VAL, buffer);
-		return;
-	}
+	if(rpc_version < 3) {
+		if(!object) {
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			packnull(buffer);
+			packnull(buffer);
+			packnull(buffer);
+			pack32(NO_VAL, buffer);
+			return;
+		}
  
-	if(object->assoc_list)
-		count = list_count(object->assoc_list);
+		if(object->assoc_list)
+			count = list_count(object->assoc_list);
 	
-	pack32(count, buffer);
-	if(count && count != NO_VAL) {
-		itr = list_iterator_create(object->assoc_list);
-		while((assoc = list_next(itr))) {
-			pack_acct_association_rec(assoc, buffer);
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->assoc_list);
+			while((assoc = list_next(itr))) {
+				pack_acct_association_rec(assoc, rpc_version,
+							  buffer);
+			}
+			list_iterator_destroy(itr);
 		}
-		list_iterator_destroy(itr);
-	}
-	count = NO_VAL;
+		count = NO_VAL;
 
-	if(object->coordinators)
-		count = list_count(object->coordinators);
+		if(object->coordinators)
+			count = list_count(object->coordinators);
 	
-	pack32(count, buffer);
-	if(count && count != NO_VAL) {
-		itr = list_iterator_create(object->coordinators);
-		while((coord = list_next(itr))) {
-			pack_acct_coord_rec(coord, buffer);
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->coordinators);
+			while((coord = list_next(itr))) {
+				pack_acct_coord_rec(coord, rpc_version, buffer);
+			}
+			list_iterator_destroy(itr);
 		}
-		list_iterator_destroy(itr);
-	}
-	count = NO_VAL;
-
-	packstr(object->description, buffer);
-	packstr(object->name, buffer);
-	packstr(object->organization, buffer);
-
-	if(object->qos_list)
-		count = list_count(object->qos_list);
+		count = NO_VAL;
 
-	pack32(count, buffer);
+		packstr(object->description, buffer);
+		packstr(object->name, buffer);
+		packstr(object->organization, buffer);
+
+		pack32(count, buffer); // NEEDED FOR OLD QOS_LIST
+	} else if(rpc_version >= 3) {
+		if(!object) {
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			packnull(buffer);
+			packnull(buffer);
+			packnull(buffer);
+			return;
+		}
+ 
+		if(object->assoc_list)
+			count = list_count(object->assoc_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->assoc_list);
+			while((assoc = list_next(itr))) {
+				pack_acct_association_rec(assoc, rpc_version,
+							  buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
 
-	if(count && count != NO_VAL) {
-		itr = list_iterator_create(object->qos_list);
-		while((tmp_info = list_next(itr))) {
-			packstr(tmp_info, buffer);
+		if(object->coordinators)
+			count = list_count(object->coordinators);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->coordinators);
+			while((coord = list_next(itr))) {
+				pack_acct_coord_rec(coord, rpc_version, buffer);
+			}
+			list_iterator_destroy(itr);
 		}
-		list_iterator_destroy(itr);
+		count = NO_VAL;
+
+		packstr(object->description, buffer);
+		packstr(object->name, buffer);
+		packstr(object->organization, buffer);
 	}
-	count = NO_VAL;
 }
 
-extern int unpack_acct_account_rec(void **object, Buf buffer)
+extern int unpack_acct_account_rec(void **object, uint16_t rpc_version,
+				   Buf buffer)
 {
 	uint32_t uint32_tmp;
 	int i;
 	uint32_t count;
+	char *tmp_info = NULL;
 	acct_coord_rec_t *coord = NULL;
 	acct_association_rec_t *assoc = NULL;
 	acct_account_rec_t *object_ptr = xmalloc(sizeof(acct_account_rec_t));
-	char *tmp_info = NULL;
 
 	*object = object_ptr;
 
-	safe_unpack32(&count, buffer);
-	if(count != NO_VAL) {
-		object_ptr->assoc_list =
-			list_create(destroy_acct_association_rec);
-		for(i=0; i<count; i++) {
-			if(unpack_acct_association_rec((void *)&assoc, buffer)
-			   == SLURM_ERROR)
-				goto unpack_error;
-			list_append(object_ptr->assoc_list, assoc);
+	if(rpc_version < 3) {
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->assoc_list =
+				list_create(destroy_acct_association_rec);
+			for(i=0; i<count; i++) {
+				if(unpack_acct_association_rec((void *)&assoc, 
+							       rpc_version,
+							       buffer)
+				   == SLURM_ERROR)
+					goto unpack_error;
+				list_append(object_ptr->assoc_list, assoc);
+			}
 		}
-	}
-	safe_unpack32(&count, buffer);
-	if(count != NO_VAL) {
-		object_ptr->coordinators = list_create(destroy_acct_coord_rec);
-		for(i=0; i<count; i++) {
-			if(unpack_acct_coord_rec((void *)&coord, buffer)
-			   == SLURM_ERROR)
-				goto unpack_error;
-			list_append(object_ptr->coordinators, coord);
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->coordinators = 
+				list_create(destroy_acct_coord_rec);
+			for(i=0; i<count; i++) {
+				if(unpack_acct_coord_rec((void *)&coord, 
+							 rpc_version, buffer)
+				   == SLURM_ERROR)
+					goto unpack_error;
+				list_append(object_ptr->coordinators, coord);
+			}
 		}
-	}
-	safe_unpackstr_xmalloc(&object_ptr->description, &uint32_tmp, buffer);
-	safe_unpackstr_xmalloc(&object_ptr->name, &uint32_tmp, buffer);
-	safe_unpackstr_xmalloc(&object_ptr->organization, &uint32_tmp, buffer);
-	safe_unpack32(&count, buffer);
-	if(count != NO_VAL) {
-		object_ptr->qos_list = list_create(slurm_destroy_char);
-		for(i=0; i<count; i++) {
-			safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer);
-			list_append(object_ptr->qos_list, tmp_info);
+		safe_unpackstr_xmalloc(&object_ptr->description,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->name, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->organization,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				xfree(tmp_info);
+			}
+		}
+	} else if(rpc_version >= 3) {
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->assoc_list =
+				list_create(destroy_acct_association_rec);
+			for(i=0; i<count; i++) {
+				if(unpack_acct_association_rec((void *)&assoc, 
+							       rpc_version,
+							       buffer)
+				   == SLURM_ERROR)
+					goto unpack_error;
+				list_append(object_ptr->assoc_list, assoc);
+			}
 		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->coordinators = 
+				list_create(destroy_acct_coord_rec);
+			for(i=0; i<count; i++) {
+				if(unpack_acct_coord_rec((void *)&coord, 
+							 rpc_version, buffer)
+				   == SLURM_ERROR)
+					goto unpack_error;
+				list_append(object_ptr->coordinators, coord);
+			}
+		}
+		safe_unpackstr_xmalloc(&object_ptr->description,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->name, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->organization,
+				       &uint32_tmp, buffer);
 	}
 
 	return SLURM_SUCCESS;
@@ -840,7 +1166,7 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-extern void pack_acct_coord_rec(void *in, Buf buffer)
+extern void pack_acct_coord_rec(void *in, uint16_t rpc_version, Buf buffer)
 {
 	acct_coord_rec_t *object = (acct_coord_rec_t *)in;
 
@@ -854,7 +1180,8 @@ extern void pack_acct_coord_rec(void *in, Buf buffer)
 	pack16(object->direct, buffer);
 }
 
-extern int unpack_acct_coord_rec(void **object, Buf buffer)
+extern int unpack_acct_coord_rec(void **object, uint16_t rpc_version,
+				 Buf buffer)
 {
 	uint32_t uint32_tmp;
 	acct_coord_rec_t *object_ptr = xmalloc(sizeof(acct_coord_rec_t));
@@ -870,7 +1197,8 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-extern void pack_cluster_accounting_rec(void *in, Buf buffer)
+extern void pack_cluster_accounting_rec(void *in, uint16_t rpc_version,
+					Buf buffer)
 {
 	cluster_accounting_rec_t *object = (cluster_accounting_rec_t *)in;
 	
@@ -894,7 +1222,8 @@ extern void pack_cluster_accounting_rec(void *in, Buf buffer)
 	pack64(object->resv_secs, buffer);
 }
 
-extern int unpack_cluster_accounting_rec(void **object, Buf buffer)
+extern int unpack_cluster_accounting_rec(void **object, uint16_t rpc_version,
+					 Buf buffer)
 {
 	cluster_accounting_rec_t *object_ptr =
 		xmalloc(sizeof(cluster_accounting_rec_t));
@@ -916,80 +1245,205 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-extern void pack_acct_cluster_rec(void *in, Buf buffer)
+extern void pack_acct_cluster_rec(void *in, uint16_t rpc_version, Buf buffer)
 {
 	cluster_accounting_rec_t *acct_info = NULL;
 	ListIterator itr = NULL;
+	char *tmp_info = NULL;
 	uint32_t count = NO_VAL;
 	acct_cluster_rec_t *object = (acct_cluster_rec_t *)in;
 
-	if(!object) {
-		pack32(NO_VAL, buffer);
-		packnull(buffer);
-		pack32(0, buffer);
-		pack32(0, buffer);
-		pack32(0, buffer);
-		pack32(0, buffer);
-		pack32(0, buffer);
-		pack32(0, buffer);
-		packnull(buffer);
-		return;
-	}
+	if(rpc_version < 3) {
+		if(!object) {
+			pack32(NO_VAL, buffer);
+			packnull(buffer);
+			pack32(0, buffer);
+
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+
+			packnull(buffer);
+			pack16(0, buffer);
+			return;
+		}
  
-	if(object->accounting_list)
-		count = list_count(object->accounting_list);
+		if(object->accounting_list)
+			count = list_count(object->accounting_list);
+
+		pack32(count, buffer);
+
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->accounting_list);
+			while((acct_info = list_next(itr))) {
+				pack_cluster_accounting_rec(
+					acct_info, rpc_version, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
 
-	pack32(count, buffer);
+		packstr(object->control_host, buffer);
+		pack32(object->control_port, buffer);
+		if(!object->root_assoc) {
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+		} else {
+			pack32(object->root_assoc->fairshare, buffer);
+			pack32(object->root_assoc->max_cpu_mins_pj, buffer);
+			pack32(object->root_assoc->max_jobs, buffer);
+			pack32(object->root_assoc->max_nodes_pj, buffer);
+			pack32(object->root_assoc->max_wall_pj, buffer);
+		}
 
-	if(count && count != NO_VAL) {
-		itr = list_iterator_create(object->accounting_list);
-		while((acct_info = list_next(itr))) {
-			pack_cluster_accounting_rec(acct_info, buffer);
+		packstr(object->name, buffer);
+
+		if(rpc_version >= 3)
+			pack32(object->rpc_version, buffer);
+	} else if(rpc_version >= 3) {
+		if(!object) {
+			pack32(NO_VAL, buffer);
+			packnull(buffer);
+			pack32(0, buffer);
+
+			packnull(buffer);
+
+			pack32(NO_VAL, buffer);
+			pack_acct_association_rec(NULL, rpc_version, buffer);
+
+			pack16(0, buffer);
+			return;
 		}
-		list_iterator_destroy(itr);
-	}
-	count = NO_VAL;
+ 
+		if(object->accounting_list)
+			count = list_count(object->accounting_list);
+
+		pack32(count, buffer);
+
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->accounting_list);
+			while((acct_info = list_next(itr))) {
+				pack_cluster_accounting_rec(
+					acct_info, rpc_version, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
 
-	packstr(object->control_host, buffer);
-	pack32(object->control_port, buffer);
-	pack32(object->default_fairshare, buffer);
-	pack32(object->default_max_cpu_secs_per_job, buffer);
-	pack32(object->default_max_jobs, buffer);
-	pack32(object->default_max_nodes_per_job, buffer);
-	pack32(object->default_max_wall_duration_per_job, buffer);
+		packstr(object->control_host, buffer);
+		pack32(object->control_port, buffer);
 
-	packstr(object->name, buffer);
+		packstr(object->name, buffer);
+
+		if(object->valid_qos_list)
+			count = list_count(object->valid_qos_list);
+
+		pack32(count, buffer);
+
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->valid_qos_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		pack_acct_association_rec(object->root_assoc,
+					  rpc_version, buffer);
+
+		pack16(object->rpc_version, buffer);
+	}
 }
 
-extern int unpack_acct_cluster_rec(void **object, Buf buffer)
+extern int unpack_acct_cluster_rec(void **object, uint16_t rpc_version,
+				   Buf buffer)
 {
 	uint32_t uint32_tmp;
 	int i;
+	char *tmp_info = NULL;
 	uint32_t count;
 	acct_cluster_rec_t *object_ptr = xmalloc(sizeof(acct_cluster_rec_t));
 	cluster_accounting_rec_t *acct_info = NULL;
 
 	*object = object_ptr;
 
-	safe_unpack32(&count, buffer);
-	if(count != NO_VAL) {
-		object_ptr->accounting_list =
-			list_create(destroy_cluster_accounting_rec);
-		for(i=0; i<count; i++) {
-			unpack_cluster_accounting_rec((void *)&acct_info,
-						      buffer);
-			list_append(object_ptr->accounting_list, acct_info);
-		}
-	}
-	safe_unpackstr_xmalloc(&object_ptr->control_host, &uint32_tmp, buffer);
-	safe_unpack32(&object_ptr->control_port, buffer);
-	safe_unpack32(&object_ptr->default_fairshare, buffer);
-	safe_unpack32(&object_ptr->default_max_cpu_secs_per_job, buffer);
-	safe_unpack32(&object_ptr->default_max_jobs, buffer);
-	safe_unpack32(&object_ptr->default_max_nodes_per_job, buffer);
-	safe_unpack32(&object_ptr->default_max_wall_duration_per_job, buffer);
-	safe_unpackstr_xmalloc(&object_ptr->name, &uint32_tmp, buffer);
+	if(rpc_version < 3) {
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->accounting_list =
+				list_create(destroy_cluster_accounting_rec);
+			for(i=0; i<count; i++) {
+				unpack_cluster_accounting_rec(
+					(void *)&acct_info,
+					rpc_version, buffer);
+				list_append(object_ptr->accounting_list,
+					    acct_info);
+			}
+		}
+		safe_unpackstr_xmalloc(&object_ptr->control_host,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&object_ptr->control_port, buffer);
+		object_ptr->root_assoc = 
+			xmalloc(sizeof(acct_association_rec_t));
+		init_acct_association_rec(object_ptr->root_assoc);
+		safe_unpack32(&object_ptr->root_assoc->fairshare, buffer);
+		safe_unpack32((uint32_t *)&object_ptr->root_assoc->
+			      max_cpu_mins_pj, buffer);
+		safe_unpack32(&object_ptr->root_assoc->max_jobs, buffer);
+		safe_unpack32(&object_ptr->root_assoc->max_nodes_pj, buffer);
+		safe_unpack32(&object_ptr->root_assoc->max_wall_pj,
+			      buffer);
+		safe_unpackstr_xmalloc(&object_ptr->name, &uint32_tmp, buffer);
+		/* default to rpc version 2 since that was the version we had
+		   before we started checking .
+		*/
+		object_ptr->rpc_version = 2;
+	} else if(rpc_version >= 3) {
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->accounting_list =
+				list_create(destroy_cluster_accounting_rec);
+			for(i=0; i<count; i++) {
+				unpack_cluster_accounting_rec(
+					(void *)&acct_info,
+					rpc_version, buffer);
+				list_append(object_ptr->accounting_list,
+					    acct_info);
+			}
+		}
+
+		safe_unpackstr_xmalloc(&object_ptr->control_host,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&object_ptr->control_port, buffer);
+
+		safe_unpackstr_xmalloc(&object_ptr->name, &uint32_tmp, buffer);
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->valid_qos_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				list_append(object_ptr->valid_qos_list,
+					    tmp_info);
+			}
+		}
 
+		if(unpack_acct_association_rec(
+			   (void **)&object_ptr->root_assoc, 
+			   rpc_version, buffer)
+		   == SLURM_ERROR)
+			goto unpack_error;
+
+		safe_unpack16(&object_ptr->rpc_version, buffer);
+	}
 	return SLURM_SUCCESS;
 
 unpack_error:
@@ -998,7 +1452,7 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-extern void pack_acct_accounting_rec(void *in, Buf buffer)
+extern void pack_acct_accounting_rec(void *in, uint16_t rpc_version, Buf buffer)
 {
 	acct_accounting_rec_t *object = (acct_accounting_rec_t *)in;
 	
@@ -1014,7 +1468,8 @@ extern void pack_acct_accounting_rec(void *in, Buf buffer)
 	pack_time(object->period_start, buffer);
 }
 
-extern int unpack_acct_accounting_rec(void **object, Buf buffer)
+extern int unpack_acct_accounting_rec(void **object, uint16_t rpc_version,
+				      Buf buffer)
 {
 	acct_accounting_rec_t *object_ptr =
 		xmalloc(sizeof(acct_accounting_rec_t));
@@ -1032,106 +1487,299 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-extern void pack_acct_association_rec(void *in, Buf buffer)
+extern void pack_acct_association_rec(void *in, uint16_t rpc_version, 
+				      Buf buffer)
 {
 	acct_accounting_rec_t *acct_info = NULL;
 	ListIterator itr = NULL;
 	uint32_t count = NO_VAL;
+	char *tmp_info = NULL;
 	acct_association_rec_t *object = (acct_association_rec_t *)in;	
 	
-	if(!object) {
-		pack32(NO_VAL, buffer);
-		packnull(buffer);
-		packnull(buffer);
-		pack32(0, buffer);
-		pack32(0, buffer);
-		pack32(0, buffer);
-		pack32(0, buffer);
-		pack32(0, buffer);
-		pack32(0, buffer);
-		pack32(0, buffer);
-		packnull(buffer);
-		pack32(0, buffer);
-		packnull(buffer);
-		pack32(0, buffer);
-		pack32(0, buffer);
-		pack32(0, buffer);
-		packnull(buffer);
-		return;
-	}
+	if(rpc_version < 3) {
+		if(!object) {
+			pack32(NO_VAL, buffer);
+
+			packnull(buffer);
+			packnull(buffer);
+
+			pack32(NO_VAL, buffer);
+			pack32(0, buffer);
+			pack32(0, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+
+			packnull(buffer);
+			pack32(0, buffer);
+			packnull(buffer);
+
+			pack32(0, buffer);
+			pack32(0, buffer);
+
+			pack32(0, buffer);
+
+			packnull(buffer);
+			return;
+		}
  
-	if(object->accounting_list)
-		count = list_count(object->accounting_list);
+		if(object->accounting_list)
+			count = list_count(object->accounting_list);
+
+		pack32(count, buffer);
+
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->accounting_list);
+			while((acct_info = list_next(itr))) {
+				pack_acct_accounting_rec(acct_info, 
+							 rpc_version, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
 
-	pack32(count, buffer);
+		packstr(object->acct, buffer);
+		packstr(object->cluster, buffer);
+		pack32(object->fairshare, buffer);
+		pack32(object->id, buffer);
+		pack32(object->lft, buffer);
+		pack32(object->max_cpu_mins_pj, buffer);
+		pack32(object->max_jobs, buffer);
+		pack32(object->max_nodes_pj, buffer);
+		pack32(object->max_wall_pj, buffer);
+		packstr(object->parent_acct, buffer);
+		pack32(object->parent_id, buffer);
+		packstr(object->partition, buffer);
+		pack32(object->rgt, buffer);
+		pack32(object->uid, buffer);
+		pack32(object->used_shares, buffer);
+		packstr(object->user, buffer);	
+	} else if (rpc_version >= 3) {
+		if(!object) {
+			pack32(NO_VAL, buffer);
+			packnull(buffer);
+			packnull(buffer);
+
+			pack32(NO_VAL, buffer);
+
+			pack64(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+
+			pack32(0, buffer);
+			pack32(0, buffer);
+
+			pack64(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+
+			packnull(buffer);
+			pack32(0, buffer);
+			packnull(buffer);
+
+			pack32(NO_VAL, buffer);
+
+			pack32(0, buffer);
+			pack32(0, buffer);
+
+			pack32(0, buffer);
+
+			packnull(buffer);
+			return;
+		}
+ 
+		if(object->accounting_list)
+			count = list_count(object->accounting_list);
+
+		pack32(count, buffer);
+
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->accounting_list);
+			while((acct_info = list_next(itr))) {
+				pack_acct_accounting_rec(acct_info, 
+							 rpc_version, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
 
-	if(count && count != NO_VAL) {
-		itr = list_iterator_create(object->accounting_list);
-		while((acct_info = list_next(itr))) {
-			pack_acct_accounting_rec(acct_info, buffer);
+		packstr(object->acct, buffer);
+		packstr(object->cluster, buffer);
+
+		pack32(object->fairshare, buffer);
+
+		pack64(object->grp_cpu_mins, buffer);
+		pack32(object->grp_cpus, buffer);
+		pack32(object->grp_jobs, buffer);
+		pack32(object->grp_nodes, buffer);
+		pack32(object->grp_submit_jobs, buffer);
+		pack32(object->grp_wall, buffer);
+
+		pack32(object->id, buffer);
+		pack32(object->lft, buffer);
+
+		pack64(object->max_cpu_mins_pj, buffer);
+		pack32(object->max_cpus_pj, buffer);
+		pack32(object->max_jobs, buffer);
+		pack32(object->max_nodes_pj, buffer);
+		pack32(object->max_submit_jobs, buffer);
+		pack32(object->max_wall_pj, buffer);
+
+		packstr(object->parent_acct, buffer);
+		pack32(object->parent_id, buffer);
+		packstr(object->partition, buffer);
+
+		if(object->qos_list)
+			count = list_count(object->qos_list);
+
+		pack32(count, buffer);
+
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->qos_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
 		}
-		list_iterator_destroy(itr);
-	}
-	count = NO_VAL;
+		count = NO_VAL;
 
-	packstr(object->acct, buffer);
-	packstr(object->cluster, buffer);
-	pack32(object->fairshare, buffer);
-	pack32(object->id, buffer);
-	pack32(object->lft, buffer);
-	pack32(object->max_cpu_secs_per_job, buffer);
-	pack32(object->max_jobs, buffer);
-	pack32(object->max_nodes_per_job, buffer);
-	pack32(object->max_wall_duration_per_job, buffer);
-	packstr(object->parent_acct, buffer);
-	pack32(object->parent_id, buffer);
-	packstr(object->partition, buffer);
-	pack32(object->rgt, buffer);
-	pack32(object->uid, buffer);
-	pack32(object->used_share, buffer);
-	packstr(object->user, buffer);	
+		pack32(object->rgt, buffer);
+		pack32(object->uid, buffer);
+
+		pack32(object->used_shares, buffer);
+
+		packstr(object->user, buffer);	
+	}
 }
 
-extern int unpack_acct_association_rec(void **object, Buf buffer)
+extern int unpack_acct_association_rec(void **object, uint16_t rpc_version,
+				       Buf buffer)
 {
 	uint32_t uint32_tmp;
 	int i;
 	uint32_t count;
+	char *tmp_info = NULL;
 	acct_association_rec_t *object_ptr = 
 		xmalloc(sizeof(acct_association_rec_t));
 	acct_accounting_rec_t *acct_info = NULL;
 
 	*object = object_ptr;
 
-	safe_unpack32(&count, buffer);
-	if(count != NO_VAL) {
-		object_ptr->accounting_list =
-			list_create(destroy_acct_accounting_rec);
-		for(i=0; i<count; i++) {
-			if(unpack_acct_accounting_rec((void **)&acct_info,
-						      buffer) == SLURM_ERROR)
-				goto unpack_error;
-			list_append(object_ptr->accounting_list, acct_info);
-		}
-	}
-	safe_unpackstr_xmalloc(&object_ptr->acct, &uint32_tmp, buffer);
-	safe_unpackstr_xmalloc(&object_ptr->cluster, &uint32_tmp, buffer);
-	safe_unpack32(&object_ptr->fairshare, buffer);
-	safe_unpack32(&object_ptr->id, buffer);
-	safe_unpack32(&object_ptr->lft, buffer);
-	safe_unpack32(&object_ptr->max_cpu_secs_per_job, buffer);
-	safe_unpack32(&object_ptr->max_jobs, buffer);
-	safe_unpack32(&object_ptr->max_nodes_per_job, buffer);
-	safe_unpack32(&object_ptr->max_wall_duration_per_job, buffer);
-	safe_unpackstr_xmalloc(&object_ptr->parent_acct, &uint32_tmp, buffer);
-	safe_unpack32(&object_ptr->parent_id, buffer);
-	safe_unpackstr_xmalloc(&object_ptr->partition, &uint32_tmp, buffer);
-	safe_unpack32(&object_ptr->rgt, buffer);
-	safe_unpack32(&object_ptr->uid, buffer);
-	safe_unpack32(&object_ptr->used_share, buffer);
-	safe_unpackstr_xmalloc(&object_ptr->user, &uint32_tmp, buffer);
+	if(rpc_version < 3) {
+		init_acct_association_rec(object_ptr);
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->accounting_list =
+				list_create(destroy_acct_accounting_rec);
+			for(i=0; i<count; i++) {
+				if(unpack_acct_accounting_rec(
+					   (void **)&acct_info,
+					   rpc_version, 
+					   buffer) == SLURM_ERROR)
+					goto unpack_error;
+				list_append(object_ptr->accounting_list, 
+					    acct_info);
+			}
+		}
+		safe_unpackstr_xmalloc(&object_ptr->acct, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->cluster, &uint32_tmp,
+				       buffer);
+
+		safe_unpack32(&object_ptr->fairshare, buffer);
+		safe_unpack32(&object_ptr->id, buffer);
+		safe_unpack32(&object_ptr->lft, buffer);
+
+		safe_unpack32(&uint32_tmp, buffer);
+		object_ptr->max_cpu_mins_pj = uint32_tmp;
+		safe_unpack32(&object_ptr->max_jobs, buffer);
+		safe_unpack32(&object_ptr->max_nodes_pj, buffer);
+		safe_unpack32(&object_ptr->max_wall_pj, buffer);
+
+		safe_unpackstr_xmalloc(&object_ptr->parent_acct, &uint32_tmp,
+				       buffer);
+		safe_unpack32(&object_ptr->parent_id, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->partition, &uint32_tmp,
+				       buffer);
+		
+		safe_unpack32(&object_ptr->rgt, buffer);
+		safe_unpack32(&object_ptr->uid, buffer);
+
+		safe_unpack32(&object_ptr->used_shares, buffer);
+
+		safe_unpackstr_xmalloc(&object_ptr->user, &uint32_tmp, buffer);
+	} else if (rpc_version >= 3) {
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->accounting_list =
+				list_create(destroy_acct_accounting_rec);
+			for(i=0; i<count; i++) {
+				if(unpack_acct_accounting_rec(
+					   (void **)&acct_info,
+					   rpc_version, 
+					   buffer) == SLURM_ERROR)
+					goto unpack_error;
+				list_append(object_ptr->accounting_list, 
+					    acct_info);
+			}
+		}
+
+		safe_unpackstr_xmalloc(&object_ptr->acct, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->cluster, &uint32_tmp,
+				       buffer);
+
+		safe_unpack32(&object_ptr->fairshare, buffer);
+
+		safe_unpack64(&object_ptr->grp_cpu_mins, buffer);
+		safe_unpack32(&object_ptr->grp_cpus, buffer);
+		safe_unpack32(&object_ptr->grp_jobs, buffer);
+		safe_unpack32(&object_ptr->grp_nodes, buffer);
+		safe_unpack32(&object_ptr->grp_submit_jobs, buffer);
+		safe_unpack32(&object_ptr->grp_wall, buffer);
+
+		safe_unpack32(&object_ptr->id, buffer);
+		safe_unpack32(&object_ptr->lft, buffer);
+
+		safe_unpack64(&object_ptr->max_cpu_mins_pj, buffer);
+		safe_unpack32(&object_ptr->max_cpus_pj, buffer);
+		safe_unpack32(&object_ptr->max_jobs, buffer);
+		safe_unpack32(&object_ptr->max_nodes_pj, buffer);
+		safe_unpack32(&object_ptr->max_submit_jobs, buffer);
+		safe_unpack32(&object_ptr->max_wall_pj, buffer);
+
+		safe_unpackstr_xmalloc(&object_ptr->parent_acct, &uint32_tmp,
+				       buffer);
+		safe_unpack32(&object_ptr->parent_id, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->partition, &uint32_tmp,
+				       buffer);
+
+		safe_unpack32(&count, buffer);
+		/* This needs to look for zero to tell if something
+		   has changed */
+		if(count != NO_VAL) {
+			object_ptr->qos_list = list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->qos_list, tmp_info);
+			}
+		}
+
+		safe_unpack32(&object_ptr->rgt, buffer);
+		safe_unpack32(&object_ptr->uid, buffer);
+
+		safe_unpack32(&object_ptr->used_shares, buffer);
+
+		safe_unpackstr_xmalloc(&object_ptr->user, &uint32_tmp, buffer);
+	}
 
-	//log_assoc_rec(object_ptr);
 	return SLURM_SUCCESS;
 
 unpack_error:
@@ -1140,30 +1788,197 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-extern void pack_acct_qos_rec(void *in, Buf buffer)
+extern void pack_acct_qos_rec(void *in, uint16_t rpc_version, Buf buffer)
 {
+	ListIterator itr = NULL;
 	acct_qos_rec_t *object = (acct_qos_rec_t *)in;	
-	if(!object) {
-		packnull(buffer);
-		pack32(0, buffer);
-		packnull(buffer);
-		return;
+	uint32_t count = NO_VAL;
+	char *tmp_info = NULL;
+
+	if(rpc_version < 3) {
+		if(!object) {
+			packnull(buffer);
+			pack32(0, buffer);
+			packnull(buffer);
+			return;
+		}
+		packstr(object->description, buffer);	
+		pack32(object->id, buffer);
+		packstr(object->name, buffer);	
+	} else if(rpc_version >= 3) {
+		if(!object) {
+			packnull(buffer);
+			pack32(0, buffer);
+			packnull(buffer);
+
+			pack64(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+
+			pack64(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+
+			packnull(buffer);
+
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+
+			pack32(0, buffer);
+
+			pack32(NO_VAL, buffer);
+			return;
+		}
+		packstr(object->description, buffer);	
+		pack32(object->id, buffer);
+
+		pack64(object->grp_cpu_mins, buffer);
+		pack32(object->grp_cpus, buffer);
+		pack32(object->grp_jobs, buffer);
+		pack32(object->grp_nodes, buffer);
+		pack32(object->grp_submit_jobs, buffer);
+		pack32(object->grp_wall, buffer);
+
+		pack64(object->max_cpu_mins_pu, buffer);
+		pack32(object->max_cpus_pu, buffer);
+		pack32(object->max_jobs_pu, buffer);
+		pack32(object->max_nodes_pu, buffer);
+		pack32(object->max_submit_jobs_pu, buffer);
+		pack32(object->max_wall_pu, buffer);
+
+		packstr(object->name, buffer);	
+
+		if(object->preemptee_list)
+			count = list_count(object->preemptee_list);
+
+		pack32(count, buffer);
+
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->preemptee_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+		
+		if(object->preemptor_list)
+			count = list_count(object->preemptor_list);
+
+		pack32(count, buffer);
+
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->preemptor_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+		
+		pack32(object->priority, buffer);
+		
+		if(object->user_limit_list)
+			count = list_count(object->user_limit_list);
+
+		pack32(count, buffer);
+
+		if(count && count != NO_VAL) {
+			acct_used_limits_t *used_limits = NULL;
+			itr = list_iterator_create(object->user_limit_list);
+			while((used_limits = list_next(itr))) {
+				pack_acct_used_limits(used_limits,
+						      rpc_version, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
 	}
-	packstr(object->description, buffer);	
-	pack32(object->id, buffer);
-	packstr(object->name, buffer);	
 }
 
-extern int unpack_acct_qos_rec(void **object, Buf buffer)
+extern int unpack_acct_qos_rec(void **object, uint16_t rpc_version, Buf buffer)
 {
 	uint32_t uint32_tmp;
+	int i;
 	acct_qos_rec_t *object_ptr = xmalloc(sizeof(acct_qos_rec_t));
+	uint32_t count = NO_VAL;
+	char *tmp_info = NULL;
 
 	*object = object_ptr;
-	safe_unpackstr_xmalloc(&object_ptr->description, &uint32_tmp, buffer);
-	safe_unpack32(&object_ptr->id, buffer);
-	safe_unpackstr_xmalloc(&object_ptr->name, &uint32_tmp, buffer);
+	
+	if(rpc_version < 3) {
+		safe_unpackstr_xmalloc(&object_ptr->description,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&object_ptr->id, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->name, &uint32_tmp, buffer);
+	} else if(rpc_version >=3) {
+		safe_unpackstr_xmalloc(&object_ptr->description,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&object_ptr->id, buffer);
+
+		safe_unpack64(&object_ptr->grp_cpu_mins, buffer);
+		safe_unpack32(&object_ptr->grp_cpus, buffer);
+		safe_unpack32(&object_ptr->grp_jobs, buffer);
+		safe_unpack32(&object_ptr->grp_nodes, buffer);
+		safe_unpack32(&object_ptr->grp_submit_jobs, buffer);
+		safe_unpack32(&object_ptr->grp_wall, buffer);
+
+		safe_unpack64(&object_ptr->max_cpu_mins_pu, buffer);
+		safe_unpack32(&object_ptr->max_cpus_pu, buffer);
+		safe_unpack32(&object_ptr->max_jobs_pu, buffer);
+		safe_unpack32(&object_ptr->max_nodes_pu, buffer);
+		safe_unpack32(&object_ptr->max_submit_jobs_pu, buffer);
+		safe_unpack32(&object_ptr->max_wall_pu, buffer);
+
+		safe_unpackstr_xmalloc(&object_ptr->name, &uint32_tmp, buffer);
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->preemptee_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->preemptee_list,
+					    tmp_info);
+			}
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->preemptor_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->preemptor_list,
+					    tmp_info);
+			}
+		}
+
+		safe_unpack32(&object_ptr->priority, buffer);
 
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			void *used_limits = NULL;
+
+			object_ptr->user_limit_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				unpack_acct_used_limits(&used_limits,
+							rpc_version, buffer);
+				list_append(object_ptr->user_limit_list,
+					    used_limits);
+			}
+		}
+
+	}
 	return SLURM_SUCCESS;
 
 unpack_error:
@@ -1172,40 +1987,85 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-extern void pack_acct_txn_rec(void *in, Buf buffer)
+extern void pack_acct_txn_rec(void *in, uint16_t rpc_version, Buf buffer)
 {
 	acct_txn_rec_t *object = (acct_txn_rec_t *)in;	
-	if(!object) {
-		pack16(0, buffer);
-		packnull(buffer);
-		pack32(0, buffer);
-		packnull(buffer);
-		pack_time(0, buffer);
-		packnull(buffer);
-		return;
+	if(rpc_version < 3) {
+		if(!object) {
+			pack16(0, buffer);
+			packnull(buffer);
+			pack32(0, buffer);
+			packnull(buffer);
+			pack_time(0, buffer);
+			packnull(buffer);
+			return;
+		}
+	
+		pack16(object->action, buffer);
+		packstr(object->actor_name, buffer);
+		pack32(object->id, buffer);
+		packstr(object->set_info, buffer);
+		pack_time(object->timestamp, buffer);
+		packstr(object->where_query, buffer);
+	} else if(rpc_version >= 3) {
+		if(!object) {
+			packnull(buffer);
+			pack16(0, buffer);
+			packnull(buffer);
+			packnull(buffer);
+			pack32(0, buffer);
+			packnull(buffer);
+			pack_time(0, buffer);
+			packnull(buffer);
+			packnull(buffer);
+			return;
+		}
+	
+		packstr(object->accts, buffer);
+		pack16(object->action, buffer);
+		packstr(object->actor_name, buffer);
+		packstr(object->clusters, buffer);
+		pack32(object->id, buffer);
+		packstr(object->set_info, buffer);
+		pack_time(object->timestamp, buffer);
+		packstr(object->users, buffer);
+		packstr(object->where_query, buffer);
 	}
-	pack16(object->action, buffer);
-	packstr(object->actor_name, buffer);
-	pack32(object->id, buffer);
-	packstr(object->set_info, buffer);
-	pack_time(object->timestamp, buffer);
-	packstr(object->where_query, buffer);
 }
 
-extern int unpack_acct_txn_rec(void **object, Buf buffer)
+extern int unpack_acct_txn_rec(void **object, uint16_t rpc_version, Buf buffer)
 {
 	uint32_t uint32_tmp;
 	acct_txn_rec_t *object_ptr = xmalloc(sizeof(acct_txn_rec_t));
 
 	*object = object_ptr;
-
-	safe_unpack16(&object_ptr->action, buffer);
-	safe_unpackstr_xmalloc(&object_ptr->actor_name, &uint32_tmp, buffer);
-	safe_unpack32(&object_ptr->id, buffer);
-	safe_unpackstr_xmalloc(&object_ptr->set_info, &uint32_tmp, buffer);
-	safe_unpack_time(&object_ptr->timestamp, buffer);
-	safe_unpackstr_xmalloc(&object_ptr->where_query, &uint32_tmp, buffer);
-
+	if(rpc_version < 3) {
+		safe_unpack16(&object_ptr->action, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->actor_name, 
+				       &uint32_tmp, buffer);
+		safe_unpack32(&object_ptr->id, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->set_info,
+				       &uint32_tmp, buffer);
+		safe_unpack_time(&object_ptr->timestamp, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->where_query,
+				       &uint32_tmp, buffer);
+	} else if (rpc_version >= 3) {
+		safe_unpackstr_xmalloc(&object_ptr->accts, 
+				       &uint32_tmp, buffer);
+		safe_unpack16(&object_ptr->action, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->actor_name, 
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->clusters, 
+				       &uint32_tmp, buffer);
+		safe_unpack32(&object_ptr->id, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->set_info,
+				       &uint32_tmp, buffer);
+		safe_unpack_time(&object_ptr->timestamp, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->users, 
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->where_query,
+				       &uint32_tmp, buffer);		
+	}
 	return SLURM_SUCCESS;
 
 unpack_error:
@@ -1215,63 +2075,86 @@ unpack_error:
 
 }
 
-extern void pack_acct_user_cond(void *in, Buf buffer)
+extern void pack_acct_user_cond(void *in, uint16_t rpc_version, Buf buffer)
 {
 	char *tmp_info = NULL;
 	ListIterator itr = NULL;
 	acct_user_cond_t *object = (acct_user_cond_t *)in;
 	uint32_t count = NO_VAL;
 
-	if(!object) {
-		pack16(0, buffer);
-		pack_acct_association_cond(NULL, buffer);
-		pack32(NO_VAL, buffer);
-		pack32(NO_VAL, buffer);
-		pack16(0, buffer);
-		pack16(0, buffer);
-		pack16(0, buffer);
-		return;
-	}
+	if(rpc_version < 3) {
+		if(!object) {
+			pack16(0, buffer);
+			pack_acct_association_cond(NULL, rpc_version, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack16(0, buffer);
+			pack16(0, buffer);
+			pack16(0, buffer);
+			return;
+		}
  
-	pack16((uint16_t)object->admin_level, buffer);
+		pack16(object->admin_level, buffer);
 
-	pack_acct_association_cond(object->assoc_cond, buffer);
+		pack_acct_association_cond(object->assoc_cond, 
+					   rpc_version, buffer);
 	
-	if(object->def_acct_list)
-		count = list_count(object->def_acct_list);
+		if(object->def_acct_list)
+			count = list_count(object->def_acct_list);
 
-	pack32(count, buffer);
+		pack32(count, buffer);
 
-	if(count && count != NO_VAL) {
-		itr = list_iterator_create(object->def_acct_list);
-		while((tmp_info = list_next(itr))) {
-			packstr(tmp_info, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->def_acct_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
 		}
-		list_iterator_destroy(itr);
-	}
-	count = NO_VAL;
+		count = NO_VAL;
 
-	if(object->qos_list)
-		count = list_count(object->qos_list);
+		pack32(count, buffer); // NEEDED FOR OLD qos_list
+
+		pack16(object->with_assocs, buffer);
+		pack16(object->with_coords, buffer);
+		pack16(object->with_deleted, buffer);
+	} else if(rpc_version >= 3) {
+		if(!object) {
+			pack16(0, buffer);
+			pack_acct_association_cond(NULL, rpc_version, buffer);
+			pack32(NO_VAL, buffer);
+			pack16(0, buffer);
+			pack16(0, buffer);
+			pack16(0, buffer);
+			return;
+		}
+ 
+		pack16(object->admin_level, buffer);
 
-	pack32(count, buffer);
+		pack_acct_association_cond(object->assoc_cond, 
+					   rpc_version, buffer);
+	
+		if(object->def_acct_list)
+			count = list_count(object->def_acct_list);
 
-	if(count && count != NO_VAL) {
-		itr = list_iterator_create(object->qos_list);
-		while((tmp_info = list_next(itr))) {
-			packstr(tmp_info, buffer);
-		}
-		list_iterator_destroy(itr);
-	}
-	count = NO_VAL;
+		pack32(count, buffer);
 
-	pack16((uint16_t)object->with_assocs, buffer);
-	pack16((uint16_t)object->with_coords, buffer);
-	pack16((uint16_t)object->with_deleted, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->def_acct_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
 
+		pack16(object->with_assocs, buffer);
+		pack16(object->with_coords, buffer);
+		pack16(object->with_deleted, buffer);
+	}
 }
 
-extern int unpack_acct_user_cond(void **object, Buf buffer)
+extern int unpack_acct_user_cond(void **object, uint16_t rpc_version, Buf buffer)
 {
 	uint32_t uint32_tmp;
 	int i;
@@ -1281,108 +2164,166 @@ extern int unpack_acct_user_cond(void **object, Buf buffer)
 
 	*object = object_ptr;
 
-	safe_unpack16((uint16_t *)&object_ptr->admin_level, buffer);
-
-	if(unpack_acct_association_cond((void **)&object_ptr->assoc_cond,
-					buffer) == SLURM_ERROR)
-		goto unpack_error;
-	
-	safe_unpack32(&count, buffer);
-	if(count != NO_VAL) {
-		object_ptr->def_acct_list = list_create(slurm_destroy_char);
-		for(i=0; i<count; i++) {
-			safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer);
-			list_append(object_ptr->def_acct_list, tmp_info);
+	if(rpc_version < 3) {
+		safe_unpack16(&object_ptr->admin_level, buffer);
+		
+		if(unpack_acct_association_cond(
+			   (void **)&object_ptr->assoc_cond,
+			   rpc_version, buffer) == SLURM_ERROR)
+			goto unpack_error;
+		
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->def_acct_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				list_append(object_ptr->def_acct_list,
+					    tmp_info);
+			}
 		}
-	}
-	safe_unpack32(&count, buffer);
-	if(count != NO_VAL) {
-		object_ptr->qos_list = list_create(slurm_destroy_char);
-		for(i=0; i<count; i++) {
-			safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer);
-			list_append(object_ptr->qos_list, tmp_info);
+		safe_unpack32(&count, buffer);
+
+		safe_unpack16(&object_ptr->with_assocs, buffer);
+		safe_unpack16(&object_ptr->with_coords, buffer);
+		safe_unpack16(&object_ptr->with_deleted, buffer);
+	} else if(rpc_version >= 3) {
+		safe_unpack16(&object_ptr->admin_level, buffer);
+		
+		if(unpack_acct_association_cond(
+			   (void **)&object_ptr->assoc_cond,
+			   rpc_version, buffer) == SLURM_ERROR)
+			goto unpack_error;
+		
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->def_acct_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				list_append(object_ptr->def_acct_list,
+					    tmp_info);
+			}
 		}
+		safe_unpack16(&object_ptr->with_assocs, buffer);
+		safe_unpack16(&object_ptr->with_coords, buffer);
+		safe_unpack16(&object_ptr->with_deleted, buffer);
 	}
-	safe_unpack16((uint16_t *)&object_ptr->with_assocs, buffer);
-	safe_unpack16((uint16_t *)&object_ptr->with_coords, buffer);
-	safe_unpack16((uint16_t *)&object_ptr->with_deleted, buffer);
-
 	return SLURM_SUCCESS;
-
+		
 unpack_error:
 	destroy_acct_user_cond(object_ptr);
 	*object = NULL;
 	return SLURM_ERROR;
 }
 
-extern void pack_acct_account_cond(void *in, Buf buffer)
+extern void pack_acct_account_cond(void *in, uint16_t rpc_version, Buf buffer)
 {
 	char *tmp_info = NULL;
 	ListIterator itr = NULL;
 	acct_account_cond_t *object = (acct_account_cond_t *)in;
 	uint32_t count = NO_VAL;
 
-	if(!object) {
-		pack_acct_association_cond(NULL, buffer);
-		pack32(NO_VAL, buffer);
-		pack32(NO_VAL, buffer);
-		pack32(NO_VAL, buffer);
-		pack16(0, buffer);
-		pack16(0, buffer);
-		pack16(0, buffer);
-		return;
-	}
-	pack_acct_association_cond(object->assoc_cond, buffer);
-	
-	count = NO_VAL;
-	if(object->description_list)
-		count = list_count(object->description_list);
-
-	pack32(count, buffer);
-
-	if(count && count != NO_VAL) {
-		itr = list_iterator_create(object->description_list);
-		while((tmp_info = list_next(itr))) {
-			packstr(tmp_info, buffer);
+	if(rpc_version < 3) {
+		if(!object) {
+			pack_acct_association_cond(NULL, rpc_version, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack16(0, buffer);
+			pack16(0, buffer);
+			pack16(0, buffer);
+			return;
 		}
-		list_iterator_destroy(itr);
-	}
-	count = NO_VAL;
-
-	if(object->organization_list)
-		count = list_count(object->organization_list);
-
-	pack32(count, buffer);
-
-	if(count && count != NO_VAL) {
-		itr = list_iterator_create(object->organization_list);
-		while((tmp_info = list_next(itr))) {
-			packstr(tmp_info, buffer);
+		pack_acct_association_cond(object->assoc_cond,
+					   rpc_version, buffer);
+		
+		count = NO_VAL;
+		if(object->description_list)
+			count = list_count(object->description_list);
+		
+		pack32(count, buffer);
+		
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->description_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
 		}
-		list_iterator_destroy(itr);
-	}
-	count = NO_VAL;
-
-	if(object->qos_list)
-		count = list_count(object->qos_list);
-
-	pack32(count, buffer);
-
-	if(count && count != NO_VAL) {
-		itr = list_iterator_create(object->qos_list);
-		while((tmp_info = list_next(itr))) {
-			packstr(tmp_info, buffer);
+		count = NO_VAL;
+		
+		if(object->organization_list)
+			count = list_count(object->organization_list);
+		
+		pack32(count, buffer);
+		
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->organization_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+		
+		pack32(count, buffer);
+		
+		pack16(object->with_assocs, buffer);
+		pack16(object->with_coords, buffer);
+		pack16(object->with_deleted, buffer);
+	} else if(rpc_version >= 3) {
+		if(!object) {
+			pack_acct_association_cond(NULL, rpc_version, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack16(0, buffer);
+			pack16(0, buffer);
+			pack16(0, buffer);
+			return;
+		}
+		pack_acct_association_cond(object->assoc_cond,
+					   rpc_version, buffer);
+		
+		count = NO_VAL;
+		if(object->description_list)
+			count = list_count(object->description_list);
+		
+		pack32(count, buffer);
+		
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->description_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
 		}
-		list_iterator_destroy(itr);
 		count = NO_VAL;
+		
+		if(object->organization_list)
+			count = list_count(object->organization_list);
+		
+		pack32(count, buffer);
+		
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->organization_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+		
+		pack16(object->with_assocs, buffer);
+		pack16(object->with_coords, buffer);
+		pack16(object->with_deleted, buffer);		
 	}
-
-	pack16((uint16_t)object->with_assocs, buffer);
-	pack16((uint16_t)object->with_coords, buffer);
-	pack16((uint16_t)object->with_deleted, buffer);
 }
 
-extern int unpack_acct_account_cond(void **object, Buf buffer)
+extern int unpack_acct_account_cond(void **object, uint16_t rpc_version,
+				    Buf buffer)
 {
 	uint32_t uint32_tmp;
 	int i;
@@ -1391,38 +2332,72 @@ extern int unpack_acct_account_cond(void **object, Buf buffer)
 	char *tmp_info = NULL;
 
 	*object = object_ptr;
-	if(unpack_acct_association_cond((void **)&object_ptr->assoc_cond,
-					buffer) == SLURM_ERROR)
-		goto unpack_error;
-	
-	safe_unpack32(&count, buffer);
-	if(count != NO_VAL) {
-		object_ptr->description_list = list_create(slurm_destroy_char);
-		for(i=0; i<count; i++) {
-			safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer);
-			list_append(object_ptr->description_list, tmp_info);
+
+	if(rpc_version < 3) {
+		if(unpack_acct_association_cond(
+			   (void **)&object_ptr->assoc_cond,
+			   rpc_version, buffer) == SLURM_ERROR)
+			goto unpack_error;
+		
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->description_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				list_append(object_ptr->description_list,
+					    tmp_info);
+			}
 		}
-	}
-	safe_unpack32(&count, buffer);
-	if(count != NO_VAL) {
-		object_ptr->organization_list = list_create(slurm_destroy_char);
-		for(i=0; i<count; i++) {
-			safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer);
-			list_append(object_ptr->organization_list, tmp_info);
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->organization_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				list_append(object_ptr->organization_list,
+					    tmp_info);
+			}
 		}
-	}
-	safe_unpack32(&count, buffer);
-	if(count != NO_VAL) {
-		object_ptr->qos_list = list_create(slurm_destroy_char);
-		for(i=0; i<count; i++) {
-			safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer);
-			list_append(object_ptr->qos_list, tmp_info);
+		safe_unpack32(&count, buffer);
+		safe_unpack16(&object_ptr->with_assocs, buffer);
+		safe_unpack16(&object_ptr->with_coords, buffer);
+		safe_unpack16(&object_ptr->with_deleted, buffer);
+	} else if (rpc_version >= 3) {
+		if(unpack_acct_association_cond(
+			   (void **)&object_ptr->assoc_cond,
+			   rpc_version, buffer) == SLURM_ERROR)
+			goto unpack_error;
+		
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->description_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				list_append(object_ptr->description_list,
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->organization_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				list_append(object_ptr->organization_list,
+					    tmp_info);
+			}
 		}
-	}
-	safe_unpack16((uint16_t *)&object_ptr->with_assocs, buffer);
-	safe_unpack16((uint16_t *)&object_ptr->with_coords, buffer);
-	safe_unpack16((uint16_t *)&object_ptr->with_deleted, buffer);
 
+		safe_unpack16(&object_ptr->with_assocs, buffer);
+		safe_unpack16(&object_ptr->with_coords, buffer);
+		safe_unpack16(&object_ptr->with_deleted, buffer);
+	}
 	return SLURM_SUCCESS;
 
 unpack_error:
@@ -1431,7 +2406,7 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-extern void pack_acct_cluster_cond(void *in, Buf buffer)
+extern void pack_acct_cluster_cond(void *in, uint16_t rpc_version, Buf buffer)
 {
 	char *tmp_info = NULL;
 	ListIterator itr = NULL;
@@ -1459,15 +2434,17 @@ extern void pack_acct_cluster_cond(void *in, Buf buffer)
 		}
 		list_iterator_destroy(itr);
 	}
+	count = NO_VAL;
 
 	pack32(object->usage_end, buffer);
 	pack32(object->usage_start, buffer);
 
-	pack16((uint16_t)object->with_usage, buffer);
-	pack16((uint16_t)object->with_deleted, buffer);
+	pack16(object->with_usage, buffer);
+	pack16(object->with_deleted, buffer);
 }
 
-extern int unpack_acct_cluster_cond(void **object, Buf buffer)
+extern int unpack_acct_cluster_cond(void **object, uint16_t rpc_version, 
+				    Buf buffer)
 {
 	uint32_t uint32_tmp;
 	int i;
@@ -1477,7 +2454,7 @@ extern int unpack_acct_cluster_cond(void **object, Buf buffer)
 
 	*object = object_ptr;
 	safe_unpack32(&count, buffer);
-	if(count != NO_VAL) {
+	if(count && count != NO_VAL) {
 		object_ptr->cluster_list = list_create(slurm_destroy_char);
 		for(i=0; i<count; i++) {
 			safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer);
@@ -1487,8 +2464,8 @@ extern int unpack_acct_cluster_cond(void **object, Buf buffer)
 	safe_unpack32(&object_ptr->usage_end, buffer);
 	safe_unpack32(&object_ptr->usage_start, buffer);
 
-	safe_unpack16((uint16_t *)&object_ptr->with_usage, buffer);
-	safe_unpack16((uint16_t *)&object_ptr->with_deleted, buffer);
+	safe_unpack16(&object_ptr->with_usage, buffer);
+	safe_unpack16(&object_ptr->with_deleted, buffer);
 
 	return SLURM_SUCCESS;
 
@@ -1498,7 +2475,8 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-extern void pack_acct_association_cond(void *in, Buf buffer)
+extern void pack_acct_association_cond(void *in, uint16_t rpc_version, 
+				       Buf buffer)
 {
 	char *tmp_info = NULL;
 	uint32_t count = NO_VAL;
@@ -1506,110 +2484,454 @@ extern void pack_acct_association_cond(void *in, Buf buffer)
 	ListIterator itr = NULL;
 	acct_association_cond_t *object = (acct_association_cond_t *)in;
 
-	if(!object) {
-		pack32(NO_VAL, buffer);
-		pack32(NO_VAL, buffer);
-		pack32(0, buffer);
-		pack32(NO_VAL, buffer);
-		pack32(0, buffer);
-		pack32(0, buffer);
-		pack32(0, buffer);
-		pack32(0, buffer);
-		pack32(NO_VAL, buffer);
-		packnull(buffer);
-		pack32(0, buffer);
-		pack32(0, buffer);
-		pack32(NO_VAL, buffer);
-		pack16(0, buffer);
-		pack16(0, buffer);
-		pack16(0, buffer);
-		pack16(0, buffer);
-		return;
-	}
+	if(rpc_version < 3) {
+		if(!object) {
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			packnull(buffer);
+			pack32(0, buffer);
+			pack32(0, buffer);
+			pack32(NO_VAL, buffer);
+			pack16(0, buffer);
+			pack16(0, buffer);
+			pack16(0, buffer);
+			pack16(0, buffer);
+			return;
+		}
 
-	if(object->acct_list)
-		count = list_count(object->acct_list);
+		if(object->acct_list)
+			count = list_count(object->acct_list);
 	
-	pack32(count, buffer);
-	if(count && count != NO_VAL) {
-		itr = list_iterator_create(object->acct_list);
-		while((tmp_info = list_next(itr))) {
-			packstr(tmp_info, buffer);
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->acct_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
 		}
-		list_iterator_destroy(itr);
-	}
-	count = NO_VAL;
+		count = NO_VAL;
 
-	if(object->cluster_list)
-		count = list_count(object->cluster_list);
+		if(object->cluster_list)
+			count = list_count(object->cluster_list);
 	
-	pack32(count, buffer);
-	if(count && count != NO_VAL) {
-		itr = list_iterator_create(object->cluster_list);
-		while((tmp_info = list_next(itr))) {
-			packstr(tmp_info, buffer);
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->cluster_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
 		}
-		list_iterator_destroy(itr);
-	}
-	count = NO_VAL;
+		count = NO_VAL;
 
-	pack32(object->fairshare, buffer);
+		if(object->fairshare_list 
+		   && list_count(object->fairshare_list)) 
+			pack32(atoi(list_peek(object->fairshare_list)), 
+			       buffer);
+		else 
+			pack32(count, buffer);
 	
-	if(object->id_list)
-		count = list_count(object->id_list);
+		if(object->id_list)
+			count = list_count(object->id_list);
 	
-	pack32(count, buffer);
-	if(count && count != NO_VAL) {
-		itr = list_iterator_create(object->id_list);
-		while((tmp_info = list_next(itr))) {
-			packstr(tmp_info, buffer);
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->id_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
 		}
-	}
-	count = NO_VAL;
+		count = NO_VAL;
+		
+		if(object->max_cpu_mins_pj_list
+		   && list_count(object->max_cpu_mins_pj_list)) 
+			pack32(atoi(list_peek(object->max_cpu_mins_pj_list)), 
+			       buffer);
+		else 
+			pack32(count, buffer);
+		
+		if(object->max_jobs_list && list_count(object->max_jobs_list)) 
+			pack32(atoi(list_peek(object->max_jobs_list)), 
+			       buffer);
+		else 
+			pack32(count, buffer);
+
+		if(object->max_nodes_pj_list
+		   && list_count(object->max_nodes_pj_list)) 
+			pack32(atoi(list_peek(object->max_nodes_pj_list)), 
+			       buffer);
+		else 
+			pack32(count, buffer);
+
+		if(object->max_wall_pj_list 
+		   && list_count(object->max_wall_pj_list)) 
+			pack32(atoi(list_peek(object->max_wall_pj_list)), 
+			       buffer);
+		else 
+			pack32(count, buffer);
+
+		if(object->partition_list)
+			count = list_count(object->partition_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->partition_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
 
-	pack32(object->max_cpu_secs_per_job, buffer);
-	pack32(object->max_jobs, buffer);
-	pack32(object->max_nodes_per_job, buffer);
-	pack32(object->max_wall_duration_per_job, buffer);
+		if(object->parent_acct_list 
+		   && list_count(object->parent_acct_list)) 
+			packstr(list_peek(object->parent_acct_list), 
+			       buffer);
+		else 
+			packnull(buffer);
 
-	if(object->partition_list)
-		count = list_count(object->partition_list);
+		pack32(object->usage_end, buffer);
+		pack32(object->usage_start, buffer);
+
+		if(object->user_list)
+			count = list_count(object->user_list);
 	
-	pack32(count, buffer);
-	if(count && count != NO_VAL) {
-		itr = list_iterator_create(object->partition_list);
-		while((tmp_info = list_next(itr))) {
-			packstr(tmp_info, buffer);
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->user_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		pack16(object->with_usage, buffer);
+		pack16(object->with_deleted, buffer);
+		pack16(object->without_parent_info, buffer);
+		pack16(object->without_parent_limits, buffer);
+	} else if(rpc_version >= 3) {
+		if(!object) {
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+
+			pack32(NO_VAL, buffer);
+
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+
+			pack32(NO_VAL, buffer);
+
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			
+			pack32(NO_VAL, buffer);
+
+			pack32(0, buffer);
+			pack32(0, buffer);
+
+			pack32(NO_VAL, buffer);
+
+			pack16(0, buffer);
+			pack16(0, buffer);
+			pack16(0, buffer);
+			pack16(0, buffer);
+			pack16(0, buffer);
+			pack16(0, buffer);
+			return;
+		}
+
+		if(object->acct_list)
+			count = list_count(object->acct_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->acct_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->cluster_list)
+			count = list_count(object->cluster_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->cluster_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->fairshare_list)
+			count = list_count(object->fairshare_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->fairshare_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->grp_cpu_mins_list)
+			count = list_count(object->grp_cpu_mins_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->grp_cpu_mins_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->grp_cpus_list)
+			count = list_count(object->grp_cpus_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->grp_cpus_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->grp_jobs_list)
+			count = list_count(object->grp_jobs_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->grp_jobs_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->grp_nodes_list)
+			count = list_count(object->grp_nodes_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->grp_nodes_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->grp_submit_jobs_list)
+			count = list_count(object->grp_submit_jobs_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(
+				object->grp_submit_jobs_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->grp_wall_list)
+			count = list_count(object->grp_wall_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->grp_wall_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->id_list)
+			count = list_count(object->id_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->id_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+		}
+		count = NO_VAL;
+
+		if(object->max_cpu_mins_pj_list)
+			count = list_count(object->max_cpu_mins_pj_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(
+				object->max_cpu_mins_pj_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+		if(object->max_cpus_pj_list)
+			count = list_count(object->max_cpus_pj_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->max_cpus_pj_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+		if(object->max_jobs_list)
+			count = list_count(object->max_jobs_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->max_jobs_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+		if(object->max_nodes_pj_list)
+			count = list_count(object->max_nodes_pj_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->max_nodes_pj_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+		if(object->max_submit_jobs_list)
+			count = list_count(object->max_submit_jobs_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(
+				object->max_submit_jobs_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+		if(object->max_wall_pj_list)
+			count = list_count(object->max_wall_pj_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->max_wall_pj_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+	
+		if(object->partition_list)
+			count = list_count(object->partition_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->partition_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->parent_acct_list)
+			count = list_count(object->parent_acct_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->parent_acct_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
 		}
-		list_iterator_destroy(itr);
-	}
-	count = NO_VAL;
+		count = NO_VAL;
 
-	packstr(object->parent_acct, buffer);
+		if(object->qos_list)
+			count = list_count(object->qos_list);
 
-	pack32(object->usage_end, buffer);
-	pack32(object->usage_start, buffer);
+		pack32(count, buffer);
 
-	if(object->user_list)
-		count = list_count(object->user_list);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->qos_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		pack32(object->usage_end, buffer);
+		pack32(object->usage_start, buffer);
+
+		if(object->user_list)
+			count = list_count(object->user_list);
 	
-	pack32(count, buffer);
-	if(count && count != NO_VAL) {
-		itr = list_iterator_create(object->user_list);
-		while((tmp_info = list_next(itr))) {
-			packstr(tmp_info, buffer);
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->user_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
 		}
-		list_iterator_destroy(itr);
-	}
-	count = NO_VAL;
+		count = NO_VAL;
 
-	pack16((uint16_t)object->with_usage, buffer);
-	pack16((uint16_t)object->with_deleted, buffer);
-	pack16((uint16_t)object->without_parent_info, buffer);
-	pack16((uint16_t)object->without_parent_limits, buffer);
+		pack16(object->with_usage, buffer);
+		pack16(object->with_deleted, buffer);
+		pack16(object->with_raw_qos, buffer);
+		pack16(object->with_sub_accts, buffer);
+		pack16(object->without_parent_info, buffer);
+		pack16(object->without_parent_limits, buffer);
+	}
 }
 
-extern int unpack_acct_association_cond(void **object, Buf buffer)
+extern int unpack_acct_association_cond(void **object, 
+					uint16_t rpc_version, Buf buffer)
 {
 	uint32_t uint32_tmp;
 	int i;
@@ -1617,68 +2939,356 @@ extern int unpack_acct_association_cond(void **object, Buf buffer)
 	acct_association_cond_t *object_ptr =
 		xmalloc(sizeof(acct_association_cond_t));
 	char *tmp_info = NULL;
-
 	*object = object_ptr;
-	safe_unpack32(&count, buffer);
-	if(count != NO_VAL) {
-		object_ptr->acct_list = list_create(slurm_destroy_char);
-		for(i=0; i<count; i++) {
-			safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer);
-			list_append(object_ptr->acct_list, tmp_info);
+
+	if(rpc_version < 3) {
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->acct_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->acct_list, tmp_info);
+			}
 		}
-	}
-	safe_unpack32(&count, buffer);
-	if(count != NO_VAL) {
-		object_ptr->cluster_list = list_create(slurm_destroy_char);
-		for(i=0; i<count; i++) {
-			safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer);
-			list_append(object_ptr->cluster_list, tmp_info);
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->cluster_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->cluster_list,
+					    tmp_info);
+			}
+		}
+		/* We have to check for 0 here because of a bug in
+		   version 2 that sent 0's when it should had sent
+		   NO_VAL
+		*/
+		safe_unpack32(&count, buffer);
+		if(count && count != NO_VAL) {
+			object_ptr->fairshare_list = 
+				list_create(slurm_destroy_char);
+			list_append(object_ptr->fairshare_list,
+				    xstrdup_printf("%u", count));
 		}
-	}
 
-	safe_unpack32(&object_ptr->fairshare, buffer);
+		safe_unpack32(&count, buffer);
+		if(count && count != NO_VAL) {
+			object_ptr->id_list = list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, 
+						       buffer);
+				list_append(object_ptr->id_list, tmp_info);
+			}
+		}
+	
+		safe_unpack32(&count, buffer);
+		if(count && count != NO_VAL) {
+			object_ptr->max_cpu_mins_pj_list = 
+				list_create(slurm_destroy_char);
+			list_append(object_ptr->max_cpu_mins_pj_list,
+				    xstrdup_printf("%u", count));
+		}
 
-	safe_unpack32(&count, buffer);
-	if(count != NO_VAL) {
-		object_ptr->id_list = list_create(slurm_destroy_char);
-		for(i=0; i<count; i++) {
-			safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer);
-			list_append(object_ptr->id_list, tmp_info);
+		safe_unpack32(&count, buffer);
+		if(count && count != NO_VAL) {
+			object_ptr->max_jobs_list = 
+				list_create(slurm_destroy_char);
+			list_append(object_ptr->max_jobs_list,
+				    xstrdup_printf("%u", count));
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count && count != NO_VAL) {
+			object_ptr->max_nodes_pj_list = 
+				list_create(slurm_destroy_char);
+			list_append(object_ptr->max_nodes_pj_list,
+				    xstrdup_printf("%u", count));
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count && count != NO_VAL) {
+			object_ptr->max_wall_pj_list = 
+				list_create(slurm_destroy_char);
+			list_append(object_ptr->max_wall_pj_list,
+				    xstrdup_printf("%u", count));
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->partition_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->partition_list,
+					    tmp_info);
+			}
+		}
+
+		safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer);
+		if(tmp_info) {
+			object_ptr->parent_acct_list = 
+				list_create(slurm_destroy_char);
+			list_append(object_ptr->parent_acct_list, tmp_info);
+		}
+
+		safe_unpack32(&object_ptr->usage_end, buffer);
+		safe_unpack32(&object_ptr->usage_start, buffer);
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->user_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->user_list, tmp_info);
+			}
+		}
+
+		safe_unpack16(&object_ptr->with_usage, buffer);
+		safe_unpack16(&object_ptr->with_deleted, buffer);
+		safe_unpack16(&object_ptr->without_parent_info, buffer);
+		safe_unpack16(&object_ptr->without_parent_limits, buffer);
+	} else if(rpc_version >= 3) {
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->acct_list =
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->acct_list, tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->cluster_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->cluster_list, 
+					    tmp_info);
+			}
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->fairshare_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->fairshare_list, 
+					    tmp_info);
+			}
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->grp_cpu_mins_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->grp_cpu_mins_list, 
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->grp_cpus_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->grp_cpus_list, 
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->grp_jobs_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->grp_jobs_list, 
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->grp_nodes_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->grp_nodes_list,
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->grp_submit_jobs_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->grp_submit_jobs_list, 
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->grp_wall_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->grp_wall_list, 
+					    tmp_info);
+			}
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->id_list = list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, 
+						       buffer);
+				list_append(object_ptr->id_list, tmp_info);
+			}
 		}
-	}
 	
-	safe_unpack32(&object_ptr->max_cpu_secs_per_job, buffer);
-	safe_unpack32(&object_ptr->max_jobs, buffer);
-	safe_unpack32(&object_ptr->max_nodes_per_job, buffer);
-	safe_unpack32(&object_ptr->max_wall_duration_per_job, buffer);
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->max_cpu_mins_pj_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->max_cpu_mins_pj_list,
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->max_cpus_pj_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->max_cpus_pj_list,
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->max_jobs_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->max_jobs_list,
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->max_nodes_pj_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->max_nodes_pj_list,
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->max_submit_jobs_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->max_submit_jobs_list, 
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->max_wall_pj_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->max_wall_pj_list,
+					    tmp_info);
+			}
+		}
 
-	safe_unpack32(&count, buffer);
-	if(count != NO_VAL) {
-		object_ptr->partition_list = list_create(slurm_destroy_char);
-		for(i=0; i<count; i++) {
-			safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer);
-			list_append(object_ptr->partition_list, tmp_info);
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->partition_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->partition_list,
+					    tmp_info);
+			}
 		}
-	}
 
-	safe_unpackstr_xmalloc(&object_ptr->parent_acct, &uint32_tmp, buffer);
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->parent_acct_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->parent_acct_list,
+					    tmp_info);
+			}
+		}
 
-	safe_unpack32(&object_ptr->usage_end, buffer);
-	safe_unpack32(&object_ptr->usage_start, buffer);
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->qos_list = list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->qos_list, tmp_info);
+			}
+		}
 
-	safe_unpack32(&count, buffer);
-	if(count != NO_VAL) {
-		object_ptr->user_list = list_create(slurm_destroy_char);
-		for(i=0; i<count; i++) {
-			safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer);
-			list_append(object_ptr->user_list, tmp_info);
+		safe_unpack32(&object_ptr->usage_end, buffer);
+		safe_unpack32(&object_ptr->usage_start, buffer);
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->user_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->user_list, tmp_info);
+			}
 		}
+
+		safe_unpack16(&object_ptr->with_usage, buffer);
+		safe_unpack16(&object_ptr->with_deleted, buffer);
+		safe_unpack16(&object_ptr->with_raw_qos, buffer);
+		safe_unpack16(&object_ptr->with_sub_accts, buffer);
+		safe_unpack16(&object_ptr->without_parent_info, buffer);
+		safe_unpack16(&object_ptr->without_parent_limits, buffer);
 	}
 
-	safe_unpack16(&object_ptr->with_usage, buffer);
-	safe_unpack16(&object_ptr->with_deleted, buffer);
-	safe_unpack16(&object_ptr->without_parent_info, buffer);
-	safe_unpack16(&object_ptr->without_parent_limits, buffer);
 	return SLURM_SUCCESS;
 
 unpack_error:
@@ -1687,7 +3297,7 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-extern void pack_acct_job_cond(void *in, Buf buffer)
+extern void pack_acct_job_cond(void *in, uint16_t rpc_version, Buf buffer)
 {
 	char *tmp_info = NULL;
 	jobacct_selected_step_t *job = NULL;
@@ -1784,7 +3394,7 @@ extern void pack_acct_job_cond(void *in, Buf buffer)
 	if(count && count != NO_VAL) {
 		itr = list_iterator_create(object->step_list);
 		while((job = list_next(itr))) {
-			pack_jobacct_selected_step(job, buffer);
+			pack_jobacct_selected_step(job, rpc_version, buffer);
 		}
 		list_iterator_destroy(itr);
 	}
@@ -1822,7 +3432,7 @@ extern void pack_acct_job_cond(void *in, Buf buffer)
 	pack16(object->without_steps, buffer);
 }
 
-extern int unpack_acct_job_cond(void **object, Buf buffer)
+extern int unpack_acct_job_cond(void **object, uint16_t rpc_version, Buf buffer)
 {
 	uint32_t uint32_tmp;
 	int i;
@@ -1885,7 +3495,7 @@ extern int unpack_acct_job_cond(void **object, Buf buffer)
 		object_ptr->step_list =
 			list_create(destroy_jobacct_selected_step);
 		for(i=0; i<count; i++) {
-			unpack_jobacct_selected_step(&job, buffer);
+			unpack_jobacct_selected_step(&job, rpc_version, buffer);
 			list_append(object_ptr->step_list, job);
 		}
 	}
@@ -1921,7 +3531,7 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-extern void pack_acct_qos_cond(void *in, Buf buffer)
+extern void pack_acct_qos_cond(void *in, uint16_t rpc_version, Buf buffer)
 {
 	uint32_t count = NO_VAL;
 	char *tmp_info = NULL;
@@ -1978,7 +3588,7 @@ extern void pack_acct_qos_cond(void *in, Buf buffer)
 	pack16(object->with_deleted, buffer);
 }
 
-extern int unpack_acct_qos_cond(void **object, Buf buffer)
+extern int unpack_acct_qos_cond(void **object, uint16_t rpc_version, Buf buffer)
 {
 	uint32_t uint32_tmp;
 	int i;
@@ -2024,66 +3634,188 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-extern void pack_acct_txn_cond(void *in, Buf buffer)
+extern void pack_acct_txn_cond(void *in, uint16_t rpc_version, Buf buffer)
 {
 	uint32_t count = NO_VAL;
 	char *tmp_info = NULL;
 	ListIterator itr = NULL;
 	acct_txn_cond_t *object = (acct_txn_cond_t *)in;
 
-	if(!object) {
-		pack32(NO_VAL, buffer);
-		pack32(NO_VAL, buffer);
-		pack32(NO_VAL, buffer);
-		pack32(0, buffer);
-		pack32(0, buffer);
-		return;
-	}
-	if(object->action_list)
-		count = list_count(object->action_list);
+	if(rpc_version < 3) {
+		if(!object) {
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(0, buffer);
+			pack32(0, buffer);
+			return;
+		}
+		if(object->action_list)
+			count = list_count(object->action_list);
 	
-	pack32(count, buffer);
-	if(count && count != NO_VAL) {
-		itr = list_iterator_create(object->action_list);
-		while((tmp_info = list_next(itr))) {
-			packstr(tmp_info, buffer);
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->action_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
 		}
-		list_iterator_destroy(itr);
-	}
-	count = NO_VAL;
+		count = NO_VAL;
 
-	if(object->actor_list) 
-		count = list_count(object->actor_list);
+		if(object->actor_list) 
+			count = list_count(object->actor_list);
 
-	pack32(count, buffer);
-	if(count && count != NO_VAL) {
-		itr = list_iterator_create(object->actor_list);
-		while((tmp_info = list_next(itr))) {
-			packstr(tmp_info, buffer);
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->actor_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr); 
 		}
-		list_iterator_destroy(itr); 
-	}
-	count = NO_VAL;
+		count = NO_VAL;
 
-	if(object->id_list)
-		count = list_count(object->id_list);
+		if(object->id_list)
+			count = list_count(object->id_list);
 	 
-	pack32(count, buffer);
-	if(count && count != NO_VAL) {
-		itr = list_iterator_create(object->id_list);
-		while((tmp_info = list_next(itr))) {
-			packstr(tmp_info, buffer);
-		} 
-		list_iterator_destroy(itr);
-	}
-	count = NO_VAL;
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->id_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			} 
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		pack32(object->time_end, buffer);
+		pack32(object->time_start, buffer);
+	} else if(rpc_version >= 3) {
+		if(!object) {
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(0, buffer);
+			pack32(0, buffer);
+			pack32(NO_VAL, buffer);
+			pack16(0, buffer);
+			return;
+		}
+		if(object->acct_list)
+			count = list_count(object->acct_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->acct_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->action_list)
+			count = list_count(object->action_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->action_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->actor_list) 
+			count = list_count(object->actor_list);
+
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->actor_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr); 
+		}
+		count = NO_VAL;
+
+		if(object->cluster_list)
+			count = list_count(object->cluster_list);
+	 
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->cluster_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			} 
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->id_list)
+			count = list_count(object->id_list);
+	 
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->id_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			} 
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->info_list)
+			count = list_count(object->info_list);
+	 
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->info_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			} 
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
 
-	pack32(object->time_end, buffer);
-	pack32(object->time_start, buffer);
+		if(object->name_list)
+			count = list_count(object->name_list);
+	 
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->name_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			} 
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
 
+		pack32(object->time_end, buffer);
+		pack32(object->time_start, buffer);
+		if(object->user_list)
+			count = list_count(object->user_list);
+	 
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->user_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			} 
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+		
+		pack16(object->with_assoc_info, buffer);
+	}
 }
 
-extern int unpack_acct_txn_cond(void **object, Buf buffer)
+extern int unpack_acct_txn_cond(void **object, uint16_t rpc_version, Buf buffer)
 {
 	uint32_t uint32_tmp;
 	int i;
@@ -2092,36 +3824,135 @@ extern int unpack_acct_txn_cond(void **object, Buf buffer)
 	char *tmp_info = NULL;
 
 	*object = object_ptr;
-	safe_unpack32(&count, buffer);
-	if(count != NO_VAL) {
-		object_ptr->action_list = list_create(slurm_destroy_char);
-		for(i=0; i<count; i++) {
-			safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer);
-			list_append(object_ptr->action_list, tmp_info);
+	if(rpc_version < 3) {
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->action_list =
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				list_append(object_ptr->action_list, tmp_info);
+			}
 		}
-	}
 
-	safe_unpack32(&count, buffer);
-	if(count != NO_VAL) {
-		object_ptr->actor_list = list_create(slurm_destroy_char);
-		for(i=0; i<count; i++) {
-			safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer);
-			list_append(object_ptr->actor_list, tmp_info);
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->actor_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				list_append(object_ptr->actor_list, tmp_info);
+			}
 		}
-	}
 
-	safe_unpack32(&count, buffer);
-	if(count != NO_VAL) {
-		object_ptr->id_list = list_create(slurm_destroy_char);
-		for(i=0; i<count; i++) {
-			safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer);
-			list_append(object_ptr->id_list, tmp_info);
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->id_list = list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				list_append(object_ptr->id_list, tmp_info);
+			}
+		}
+
+		safe_unpack32(&object_ptr->time_end, buffer);
+		safe_unpack32(&object_ptr->time_start, buffer);
+	} else if (rpc_version >=3) {
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->acct_list =
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				list_append(object_ptr->acct_list, tmp_info);
+			}
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->action_list =
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				list_append(object_ptr->action_list, tmp_info);
+			}
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->actor_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				list_append(object_ptr->actor_list, tmp_info);
+			}
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->cluster_list =
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				list_append(object_ptr->cluster_list, tmp_info);
+			}
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->id_list = list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				list_append(object_ptr->id_list, tmp_info);
+			}
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->info_list =
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				list_append(object_ptr->info_list, tmp_info);
+			}
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->name_list =
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				list_append(object_ptr->name_list, tmp_info);
+			}
+		}
+
+		safe_unpack32(&object_ptr->time_end, buffer);
+		safe_unpack32(&object_ptr->time_start, buffer);
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->user_list =
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				list_append(object_ptr->user_list, tmp_info);
+			}
 		}
+
+		safe_unpack16(&object_ptr->with_assoc_info, buffer);
 	}
 
-	safe_unpack32(&object_ptr->time_end, buffer);
-	safe_unpack32(&object_ptr->time_start, buffer);
-	
 	return SLURM_SUCCESS;
 
 unpack_error:
@@ -2130,12 +3961,13 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-extern void pack_acct_update_object(acct_update_object_t *object, Buf buffer)
+extern void pack_acct_update_object(acct_update_object_t *object,
+				    uint16_t rpc_version, Buf buffer)
 {
 	uint32_t count = NO_VAL;
 	ListIterator itr = NULL;
 	void *acct_object = NULL;
-	void (*my_function) (void *object, Buf buffer);
+	void (*my_function) (void *object, uint16_t rpc_version, Buf buffer);
 
 	pack16(object->type, buffer);
 	switch(object->type) {
@@ -2152,12 +3984,14 @@ extern void pack_acct_update_object(acct_update_object_t *object, Buf buffer)
 		my_function = pack_acct_association_rec;
 		break;
 	case ACCT_ADD_QOS:
+	case ACCT_MODIFY_QOS:
 	case ACCT_REMOVE_QOS:
 		my_function = pack_acct_qos_rec;
 		break;
 	case ACCT_UPDATE_NOTSET:
 	default:
-		error("unknown type set in update_object: %d", object->type);
+		error("pack: unknown type set in update_object: %d",
+		      object->type);
 		return;
 	}
 	if(object->objects) 
@@ -2167,25 +4001,26 @@ extern void pack_acct_update_object(acct_update_object_t *object, Buf buffer)
 	if(count && count != NO_VAL) {
 		itr = list_iterator_create(object->objects);
 		while((acct_object = list_next(itr))) {
-			(*(my_function))(acct_object, buffer);
+			(*(my_function))(acct_object, rpc_version, buffer);
 		}
 		list_iterator_destroy(itr);
 	}
 }
 
-extern int unpack_acct_update_object(acct_update_object_t **object, Buf buffer)
+extern int unpack_acct_update_object(acct_update_object_t **object, 
+				     uint16_t rpc_version, Buf buffer)
 {
 	int i;
 	uint32_t count;
 	acct_update_object_t *object_ptr = 
 		xmalloc(sizeof(acct_update_object_t));
 	void *acct_object = NULL;
-	int (*my_function) (void **object, Buf buffer);
+	int (*my_function) (void **object, uint16_t rpc_version, Buf buffer);
 	void (*my_destroy) (void *object);
 
 	*object = object_ptr;
 
-	safe_unpack16((uint16_t *)&object_ptr->type, buffer);
+	safe_unpack16(&object_ptr->type, buffer);
 	switch(object_ptr->type) {
 	case ACCT_MODIFY_USER:
 	case ACCT_ADD_USER:
@@ -2202,13 +4037,14 @@ extern int unpack_acct_update_object(acct_update_object_t **object, Buf buffer)
 		my_destroy = destroy_acct_association_rec;
 		break;
 	case ACCT_ADD_QOS:
+	case ACCT_MODIFY_QOS:
 	case ACCT_REMOVE_QOS:
 		my_function = unpack_acct_qos_rec;
 		my_destroy = destroy_acct_qos_rec;
 		break;
 	case ACCT_UPDATE_NOTSET:
 	default:
-		error("unknown type set in update_object: %d",
+		error("unpack: unknown type set in update_object: %d",
 		      object_ptr->type);
 		goto unpack_error;
 	}
@@ -2216,7 +4052,7 @@ extern int unpack_acct_update_object(acct_update_object_t **object, Buf buffer)
 	if(count != NO_VAL) {
 		object_ptr->objects = list_create((*(my_destroy)));
 		for(i=0; i<count; i++) {
-			if(((*(my_function))(&acct_object, buffer))
+			if(((*(my_function))(&acct_object, rpc_version, buffer))
 			   == SLURM_ERROR)
 				goto unpack_error;
 			list_append(object_ptr->objects, acct_object);
@@ -2259,7 +4095,8 @@ extern uint32_t str_2_acct_qos(List qos_list, char *level)
 {
 	ListIterator itr = NULL;
 	acct_qos_rec_t *qos = NULL;
-	
+	char *working_level = NULL;
+
 	if(!qos_list) {
 		error("We need a qos list to translate");
 		return NO_VAL;
@@ -2267,11 +4104,15 @@ extern uint32_t str_2_acct_qos(List qos_list, char *level)
 		debug2("no level");
 		return 0;
 	}
-
+	if(level[0] == '+' || level[0] == '-')
+		working_level = level+1;
+	else
+		working_level = level;
 
 	itr = list_iterator_create(qos_list);
 	while((qos = list_next(itr))) {
-		if(!strncasecmp(level, qos->name, strlen(level)))
+		if(!strncasecmp(working_level, qos->name, 
+				strlen(working_level)))
 			break;
 	}
 	list_iterator_destroy(itr);
@@ -2319,41 +4160,208 @@ extern acct_admin_level_t str_2_acct_admin_level(char *level)
 	}	
 }
 
-extern void log_assoc_rec(acct_association_rec_t *assoc_ptr)
+/* IN/OUT: tree_list a list of acct_print_tree_t's */ 
+extern char *get_tree_acct_name(char *name, char *parent, char *cluster, 
+				List tree_list)
 {
-	debug2("association rec id          : %u", assoc_ptr->id);
-	debug2("  acct                      : %s", assoc_ptr->acct);
-	debug2("  cluster                   : %s", assoc_ptr->cluster);
-	if(assoc_ptr->fairshare == INFINITE)
-		debug2("  fairshare                 : NONE");
-	else
-		debug2("  fairshare                 : %u",
-		       assoc_ptr->fairshare);
-	if(assoc_ptr->max_cpu_secs_per_job == INFINITE)
-		debug2("  max_cpu_secs_per_job      : NONE");
-	else
-		debug2("  max_cpu_secs_per_job      : %d",
-		       assoc_ptr->max_cpu_secs_per_job);
-	if(assoc_ptr->max_jobs == INFINITE)
-		debug2("  max_jobs                  : NONE");
-	else
-		debug2("  max_jobs                  : %u", assoc_ptr->max_jobs);
-	if(assoc_ptr->max_nodes_per_job == INFINITE)
-		debug2("  max_nodes_per_job         : NONE");
-	else
-		debug2("  max_nodes_per_job         : %d",
-		       assoc_ptr->max_nodes_per_job);
-	if(assoc_ptr->max_wall_duration_per_job == INFINITE)
-		debug2("  max_wall_duration_per_job : NONE");
+	ListIterator itr = NULL;
+	acct_print_tree_t *acct_print_tree = NULL;
+	acct_print_tree_t *par_acct_print_tree = NULL;
+	static char *ret_name = NULL;
+	static char *last_name = NULL, *last_cluster = NULL;
+
+
+	if(!tree_list) 
+		return NULL;
+		
+	itr = list_iterator_create(tree_list);
+	while((acct_print_tree = list_next(itr))) {
+		if(!strcmp(name, acct_print_tree->name)) {
+			ret_name = acct_print_tree->print_name;
+			break;
+		} else if(parent && !strcmp(parent, acct_print_tree->name)) {
+			par_acct_print_tree = acct_print_tree;
+		}
+	}
+	list_iterator_destroy(itr);
+	
+	if(parent && acct_print_tree) 
+		return ret_name;
+
+	acct_print_tree = xmalloc(sizeof(acct_print_tree_t));
+	acct_print_tree->name = xstrdup(name);
+	if(par_acct_print_tree) 
+		acct_print_tree->spaces =
+			xstrdup_printf(" %s", par_acct_print_tree->spaces);
+	else 
+		acct_print_tree->spaces = xstrdup("");
+	
+	/* user account */
+	if(name[0] == '|')
+		acct_print_tree->print_name = xstrdup_printf(
+			"%s%s", acct_print_tree->spaces, parent);	
 	else
-		debug2("  max_wall_duration_per_job : %d", 
-		       assoc_ptr->max_wall_duration_per_job);
-	debug2("  parent_acct               : %s", assoc_ptr->parent_acct);
-	debug2("  partition                 : %s", assoc_ptr->partition);
-	debug2("  user                      : %s(%u)",
-	       assoc_ptr->user, assoc_ptr->uid);
-	debug2("  used_jobs                 : %u", assoc_ptr->used_jobs);
-	debug2("  used_share                : %u", assoc_ptr->used_share);
+		acct_print_tree->print_name = xstrdup_printf(
+			"%s%s", acct_print_tree->spaces, name);	
+	
+
+	list_append(tree_list, acct_print_tree);
+
+	ret_name = acct_print_tree->print_name;
+	last_name = name;
+	last_cluster = cluster;
+
+	return acct_print_tree->print_name;
+}
+
+extern char *get_qos_complete_str(List qos_list, List num_qos_list)
+{
+	List temp_list = NULL;
+	char *temp_char = NULL;
+	char *print_this = NULL;
+	ListIterator itr = NULL;
+	int option = 0;
+
+	if(!qos_list || !list_count(qos_list)
+	   || !num_qos_list || !list_count(num_qos_list))
+		return xstrdup("");
+
+	temp_list = list_create(slurm_destroy_char);
+
+	itr = list_iterator_create(num_qos_list);
+	while((temp_char = list_next(itr))) {
+		option = 0;
+		if(temp_char[0] == '+' || temp_char[0] == '-') {
+			option = temp_char[0];
+			temp_char++;
+		}
+		temp_char = acct_qos_str(qos_list, atoi(temp_char));
+		if(temp_char) {
+			if(option) 
+				list_append(temp_list, xstrdup_printf(
+						    "%c%s", option, temp_char));
+			else 
+				list_append(temp_list, xstrdup(temp_char));
+		}
+	}
+	list_iterator_destroy(itr);
+	list_sort(temp_list, (ListCmpF)slurm_sort_char_list_asc);
+	itr = list_iterator_create(temp_list);
+	while((temp_char = list_next(itr))) {
+		if(print_this) 
+			xstrfmtcat(print_this, ",%s", temp_char);
+		else 
+			print_this = xstrdup(temp_char);
+	}
+	list_iterator_destroy(itr);
+	list_destroy(temp_list);
+
+	if(!print_this)
+		return xstrdup("");
+
+	return print_this;
+}
+
+
+extern void log_assoc_rec(acct_association_rec_t *assoc_ptr, List qos_list)
+{
+	debug2("association rec id : %u", assoc_ptr->id);
+	debug2("  acct             : %s", assoc_ptr->acct);
+	debug2("  cluster          : %s", assoc_ptr->cluster);
+
+	if(assoc_ptr->fairshare == INFINITE)
+		debug2("  Fairshare        : NONE");
+	else if(assoc_ptr->fairshare != NO_VAL) 
+		debug2("  Fairshare        : %u", assoc_ptr->fairshare);
+
+	if(assoc_ptr->grp_cpu_mins == INFINITE)
+		debug2("  GrpCPUMins      : NONE");
+	else if(assoc_ptr->grp_cpu_mins != NO_VAL) 
+		debug2("  GrpCPUMins      : %llu", assoc_ptr->grp_cpu_mins);
+		
+	if(assoc_ptr->grp_cpus == INFINITE)
+		debug2("  GrpCPUs          : NONE");
+	else if(assoc_ptr->grp_cpus != NO_VAL) 
+		debug2("  GrpCPUs          : %u", assoc_ptr->grp_cpus);
+				
+	if(assoc_ptr->grp_jobs == INFINITE) 
+		debug2("  GrpJobs          : NONE");
+	else if(assoc_ptr->grp_jobs != NO_VAL) 
+		debug2("  GrpJobs          : %u", assoc_ptr->grp_jobs);
+		
+	if(assoc_ptr->grp_nodes == INFINITE)
+		debug2("  GrpNodes         : NONE");
+	else if(assoc_ptr->grp_nodes != NO_VAL)
+		debug2("  GrpNodes         : %u", assoc_ptr->grp_nodes);
+		
+	if(assoc_ptr->grp_submit_jobs == INFINITE) 
+		debug2("  GrpSubmitJobs    : NONE");
+	else if(assoc_ptr->grp_submit_jobs != NO_VAL) 
+		debug2("  GrpSubmitJobs    : %u", assoc_ptr->grp_submit_jobs);
+		
+	if(assoc_ptr->grp_wall == INFINITE) 
+		debug2("  GrpWall          : NONE");		
+	else if(assoc_ptr->grp_wall != NO_VAL) {
+		char time_buf[32];
+		mins2time_str((time_t) assoc_ptr->grp_wall, 
+			      time_buf, sizeof(time_buf));
+		debug2("  GrpWall          : %s", time_buf);
+	}
+
+	if(assoc_ptr->max_cpu_mins_pj == INFINITE)
+		debug2("  MaxCPUMins       : NONE");
+	else if(assoc_ptr->max_cpu_mins_pj != NO_VAL) 
+		debug2("  MaxCPUMins       : %llu", assoc_ptr->max_cpu_mins_pj);
+		
+	if(assoc_ptr->max_cpus_pj == INFINITE)
+		debug2("  MaxCPUs          : NONE");
+	else if(assoc_ptr->max_cpus_pj != NO_VAL) 
+		debug2("  MaxCPUs          : %u", assoc_ptr->max_cpus_pj);
+				
+	if(assoc_ptr->max_jobs == INFINITE) 
+		debug2("  MaxJobs          : NONE");
+	else if(assoc_ptr->max_jobs != NO_VAL) 
+		debug2("  MaxJobs          : %u", assoc_ptr->max_jobs);
+		
+	if(assoc_ptr->max_nodes_pj == INFINITE)
+		debug2("  MaxNodes         : NONE");
+	else if(assoc_ptr->max_nodes_pj != NO_VAL)
+		debug2("  MaxNodes         : %u", assoc_ptr->max_nodes_pj);
+		
+	if(assoc_ptr->max_submit_jobs == INFINITE) 
+		debug2("  MaxSubmitJobs    : NONE");
+	else if(assoc_ptr->max_submit_jobs != NO_VAL) 
+		debug2("  MaxSubmitJobs    : %u", assoc_ptr->max_submit_jobs);
+		
+	if(assoc_ptr->max_wall_pj == INFINITE) 
+		debug2("  MaxWall          : NONE");		
+	else if(assoc_ptr->max_wall_pj != NO_VAL) {
+		char time_buf[32];
+		mins2time_str((time_t) assoc_ptr->max_wall_pj, 
+			      time_buf, sizeof(time_buf));
+		debug2("  MaxWall          : %s", time_buf);
+	}
+
+	if(assoc_ptr->qos_list) {
+		char *temp_char = get_qos_complete_str(qos_list,
+						       assoc_ptr->qos_list);
+		if(temp_char) {		
+			debug2("  Qos              : %s", temp_char);
+			xfree(temp_char);
+		}
+	} else {
+		debug2("  Qos              : %s", "Normal");
+	}
+
+	if(assoc_ptr->parent_acct)
+		debug2("  parent_acct      : %s", assoc_ptr->parent_acct);
+	if(assoc_ptr->partition)
+		debug2("  partition        : %s", assoc_ptr->partition);
+	if(assoc_ptr->user)
+		debug2("  user             : %s(%u)",
+		       assoc_ptr->user, assoc_ptr->uid);
+	debug2("  used_jobs        : %u", assoc_ptr->used_jobs);
+	debug2("  used_shares      : %u", assoc_ptr->used_shares);
 }
 
 /*
@@ -2408,11 +4416,13 @@ extern int slurm_acct_storage_fini(void)
 	return rc;
 }
 
-extern void *acct_storage_g_get_connection(bool make_agent, bool rollback)
+extern void *acct_storage_g_get_connection(bool make_agent, int conn_num,
+					   bool rollback)
 {
 	if (slurm_acct_storage_init(NULL) < 0)
 		return NULL;
-	return (*(g_acct_storage_context->ops.get_conn))(make_agent, rollback);
+	return (*(g_acct_storage_context->ops.get_conn))(
+		make_agent, conn_num, rollback);
 }
 
 extern int acct_storage_g_close_connection(void **db_conn)
@@ -2526,6 +4536,16 @@ extern List acct_storage_g_modify_associations(
 		(db_conn, uid, assoc_cond, assoc);
 }
 
+extern List acct_storage_g_modify_qos(void *db_conn, uint32_t uid,
+				      acct_qos_cond_t *qos_cond,
+				      acct_qos_rec_t *qos)
+{
+	if (slurm_acct_storage_init(NULL) < 0)
+		return NULL;
+	return (*(g_acct_storage_context->ops.modify_qos))
+		(db_conn, uid, qos_cond, qos);
+}
+
 extern List acct_storage_g_remove_users(void *db_conn, uint32_t uid,
 					acct_user_cond_t *user_cond)
 {
@@ -2708,12 +4728,13 @@ extern int clusteracct_storage_g_register_ctld(char *cluster, uint16_t port)
 /* 
  * load into the storage the start of a job
  */
-extern int jobacct_storage_g_job_start (void *db_conn,
+extern int jobacct_storage_g_job_start (void *db_conn, char *cluster_name,
 					struct job_record *job_ptr) 
 {
 	if (slurm_acct_storage_init(NULL) < 0)
 		return SLURM_ERROR;
-	return (*(g_acct_storage_context->ops.job_start))(db_conn, job_ptr);
+	return (*(g_acct_storage_context->ops.job_start))(
+		db_conn, cluster_name, job_ptr);
 }
 
 /* 
diff --git a/src/common/slurm_accounting_storage.h b/src/common/slurm_accounting_storage.h
index e06892a12..dfc7d8fa4 100644
--- a/src/common/slurm_accounting_storage.h
+++ b/src/common/slurm_accounting_storage.h
@@ -64,30 +64,47 @@ typedef enum {
 	ACCT_REMOVE_ASSOC,
 	ACCT_REMOVE_COORD,
 	ACCT_ADD_QOS,
-	ACCT_REMOVE_QOS
+	ACCT_REMOVE_QOS,
+	ACCT_MODIFY_QOS,
 } acct_update_type_t;
 
 /* Association conditions used for queries of the database */
 typedef struct {
 	List acct_list;		/* list of char * */
 	List cluster_list;	/* list of char * */
-	uint32_t fairshare;	/* fairshare number */
+
+	List fairshare_list;	/* fairshare number */
+
+	List grp_cpu_mins_list; /* list of char * */
+	List grp_cpus_list; /* list of char * */
+	List grp_jobs_list;	/* list of char * */
+	List grp_nodes_list; /* list of char * */
+	List grp_submit_jobs_list; /* list of char * */
+	List grp_wall_list; /* list of char * */
+
 	List id_list;		/* list of char */
-	uint32_t max_cpu_secs_per_job; /* max number of cpu seconds this 
-					* association can have per job */
-	uint32_t max_jobs;	/* max number of jobs this association can run
-				 * at one time */
-	uint32_t max_nodes_per_job; /* max number of nodes this
-				     * association can allocate per job */
-	uint32_t max_wall_duration_per_job; /* longest time this association
-					     * can run a job (seconds) */
+
+	List max_cpu_mins_pj_list; /* list of char * */
+	List max_cpus_pj_list; /* list of char * */
+	List max_jobs_list;	/* list of char * */
+	List max_nodes_pj_list; /* list of char * */
+	List max_submit_jobs_list; /* list of char * */
+	List max_wall_pj_list; /* list of char * */
+
 	List partition_list;	/* list of char * */
-	char *parent_acct;	/* name of parent account */
+	List parent_acct_list;	/* name of parent account */
+
+	List qos_list; /* list of char * */	
+
 	uint32_t usage_end; 
 	uint32_t usage_start; 
+
 	List user_list;		/* list of char * */
+
 	uint16_t with_usage;  /* fill in usage */
 	uint16_t with_deleted; /* return deleted associations */
+	uint16_t with_raw_qos; /* return a raw qos or delta_qos */
+	uint16_t with_sub_accts; /* return sub acct information also */
 	uint16_t without_parent_info; /* don't give me parent id/name */
 	uint16_t without_parent_limits; /* don't give me limits from
 					 * parents */
@@ -98,7 +115,6 @@ typedef struct {
 						names */
 	List description_list; /* list of char * */
 	List organization_list; /* list of char * */
-	List qos_list; /* list of char * */	
 	uint16_t with_assocs; 
 	uint16_t with_coords; 
 	uint16_t with_deleted; 
@@ -110,7 +126,6 @@ typedef struct {
 	char *description;
 	char *name;
 	char *organization;
-	List qos_list /* list of char *'s */;
 } acct_account_rec_t;
 
 typedef struct {
@@ -122,33 +137,86 @@ typedef struct {
 typedef struct acct_association_rec {
 	List accounting_list; 	/* list of acct_accounting_rec_t *'s */
 	char *acct;		/* account/project associated to association */
-	char *cluster;		/* cluster associated to association */
+	char *cluster;		/* cluster associated to association
+				 * */
+
 	uint32_t fairshare;	/* fairshare number */
+
+	uint64_t grp_cpu_mins; /* max number of cpu hours the
+				     * underlying group of
+				     * associations can run for */
+	uint32_t grp_cpus; /* max number of cpus the
+				* underlying group of 
+				* associations can allocate at one time */
+	uint32_t grp_jobs;	/* max number of jobs the
+				 * underlying group of associations can run
+				 * at one time */
+	uint32_t grp_nodes; /* max number of nodes the
+				 * underlying group of
+				 * associations can allocate at once */
+	uint32_t grp_submit_jobs; /* max number of jobs the
+				       * underlying group of
+				       * associations can submit at
+				       * one time */
+	uint32_t grp_wall; /* total time in hours the 
+			    * underlying group of
+			    * associations can run for */
+
+	uint32_t grp_used_cpu_mins; /* cpu hours the
+				      * underlying group of
+				      * associations has ran for 
+				      * (DON'T PACK) */
+	uint32_t grp_used_cpus; /* count of active jobs in the group
+				 * (DON'T PACK) */
+	uint32_t grp_used_nodes; /* count of active jobs in the group
+				  * (DON'T PACK) */
+	uint32_t grp_used_wall; /* group count of time used in
+				     * running jobs (DON'T PACK) */
+	
 	uint32_t id;		/* id identifing a combination of
 				 * user-account-cluster(-partition) */
+
+	uint32_t level_shares;  /* number of shares on this level of
+				 * the tree (DON'T PACK) */
+	
 	uint32_t lft;		/* lft used for grouping sub
 				 * associations and jobs as a left
 				 * most container used with rgt */
-	uint32_t max_cpu_secs_per_job; /* max number of cpu seconds this 
-					   * association can have per job */
+	
+	uint64_t max_cpu_mins_pj; /* max number of cpu seconds this 
+				   * association can have per job */
+	uint32_t max_cpus_pj; /* max number of cpus this 
+				    * association can allocate per job */
 	uint32_t max_jobs;	/* max number of jobs this association can run
 				 * at one time */
-	uint32_t max_nodes_per_job; /* max number of nodes this
+	uint32_t max_nodes_pj; /* max number of nodes this
 				     * association can allocate per job */
-	uint32_t max_wall_duration_per_job; /* longest time this
-					     * association can run a job */
+	uint32_t max_submit_jobs; /* max number of jobs that can be
+				     submitted by association */
+	uint32_t max_wall_pj; /* longest time this
+			       * association can run a job */
+	
 	char *parent_acct;	/* name of parent account */
-	struct acct_association_rec *parent_acct_ptr;	/* ptr to parent acct
-							 * set in slurmctld */
+	struct acct_association_rec *parent_assoc_ptr;	/* ptr to parent acct
+							 * set in
+							 * slurmctld 
+							 * (DON'T PACK) */
 	uint32_t parent_id;	/* id of parent account */
 	char *partition;	/* optional partition in a cluster 
 				 * associated to association */
+
+	List qos_list;          /* list of char * */
+
 	uint32_t rgt;		/* rgt used for grouping sub
 				 * associations and jobs as a right
 				 * most container used with lft */
 	uint32_t uid;		/* user ID */
-	uint32_t used_jobs;	/* count of active jobs */
-	uint32_t used_share;	/* measure of resource usage */
+	
+	uint32_t used_jobs;	/* count of active jobs (DON'T PACK) */
+	uint32_t used_shares;	/* measure of resource usage */
+	uint32_t used_submit_jobs; /* count of jobs pending or running
+				    * (DON'T PACK) */
+	
 	char *user;		/* user associated to association */
 } acct_association_rec_t;
 
@@ -164,18 +232,12 @@ typedef struct {
 	List accounting_list; /* list of cluster_accounting_rec_t *'s */
 	char *control_host;
 	uint32_t control_port;
-	uint32_t default_fairshare;	/* fairshare number */
-	uint32_t default_max_cpu_secs_per_job;/* max number of cpu seconds this 
-					       * association can have per job */
-	uint32_t default_max_jobs;/* max number of jobs this association can run
-				   * at one time */
-	uint32_t default_max_nodes_per_job; /* max number of nodes this
-					     * association can
-					     * allocate per job */
-	uint32_t default_max_wall_duration_per_job; /* longest time this
-					     * association can run a job */
 	char *name;
 
+	List valid_qos_list;
+	acct_association_rec_t *root_assoc; /* root association for cluster */
+
+	uint16_t rpc_version; /* version of rpc this cluter is running */
 } acct_cluster_rec_t;
 
 typedef struct {
@@ -201,7 +263,56 @@ typedef struct {
 typedef struct {
 	char *description;
 	uint32_t id;
+	char *job_flags;
+	List job_list; /* list of job pointers to submitted/running
+			  jobs (DON'T PACK) */
+
+	uint64_t grp_cpu_mins; /* max number of cpu hours the
+				     * underlying group of
+				     * associations can run for */
+	uint32_t grp_cpus; /* max number of cpus this qos
+			      can allocate at one time */
+	uint32_t grp_jobs;	/* max number of jobs this qos can run
+				 * at one time */
+	uint32_t grp_nodes; /* max number of nodes this qos 
+			       can allocate at once */
+	uint32_t grp_submit_jobs; /* max number of jobs this qos can submit at
+				   * one time */
+	uint32_t grp_wall; /* total time in hours this qos can run for */
+
+	uint32_t grp_used_cpu_mins; /* cpu hours this qos has ran for 
+				      * (DON'T PACK) */
+	uint32_t grp_used_cpus; /* count of cpus in use in this qos
+				 * (DON'T PACK) */
+	uint32_t grp_used_jobs;	/* count of active jobs (DON'T PACK) */
+	uint32_t grp_used_nodes; /* count of nodes in use in this qos
+				  * (DON'T PACK) */
+	uint32_t grp_used_submit_jobs; /* count of jobs pending or running
+				    * (DON'T PACK) */
+	uint32_t grp_used_wall; /* group count of time (minutes) used in
+				 * running jobs (DON'T PACK) */
+
+	uint64_t max_cpu_mins_pu; /* max number of cpu mins a user can
+				   * use with this qos */
+	uint32_t max_cpus_pu; /* max number of cpus a user can
+			       * allocate with this qos */
+	uint32_t max_jobs_pu;	/* max number of jobs a user can
+				 * run with this qos at one time */
+	uint32_t max_nodes_pu; /* max number of nodes a user can
+				* allocate with this qos at one time */
+	uint32_t max_submit_jobs_pu; /* max number of jobs a user can
+				     submit with this qos at once */
+	uint32_t max_wall_pu; /* longest time this
+			       * qos can run a job */
+
 	char *name;
+	List preemptee_list; /* list of char * list of qos's that this
+				qos can preempt */
+	List preemptor_list; /* list of char * list of qos's that this
+			      * qos is preempted by */
+	uint32_t priority;  /* ranged int needs to be a unint for
+			     * heterogeneous systems */
+	List user_limit_list; /* acct_used_limits_t's */
 } acct_qos_rec_t;
 
 typedef struct {
@@ -211,12 +322,24 @@ typedef struct {
 	uint16_t with_deleted; 
 } acct_qos_cond_t;
 
+/* Right now this is used in the acct_qos_rec_t structure.  In the
+ * user_limit_list. */
 typedef struct {
-	acct_admin_level_t admin_level;
+ 	uint64_t cpu_mins;	/* count of cpu mins used */
+ 	uint32_t cpus;	/* count of cpus in use */
+	uint32_t jobs;	/* count of active jobs */
+	uint32_t nodes;	/* count of nodes in use */
+	uint32_t submit_jobs; /* count of jobs pending or running */
+	uint32_t wall; /* how much time this user has used */
+	uint32_t uid;
+} acct_used_limits_t;
+
+typedef struct {
+	uint16_t admin_level; /* really acct_admin_level_t but for
+				 packing purposes needs to be uint16_t */
 	acct_association_cond_t *assoc_cond; /* use user_list here for
 						names */
 	List def_acct_list; /* list of char * */
-	List qos_list; 	/* list of char * */
 	uint16_t with_assocs; 
 	uint16_t with_coords; 
 	uint16_t with_deleted; 
@@ -228,35 +351,46 @@ typedef struct {
  * src/slurmdbd/proc_req.c.
  */
 typedef struct {
-	acct_admin_level_t admin_level;
+	uint16_t admin_level; /* really acct_admin_level_t but for
+				 packing purposes needs to be uint16_t */
 	List assoc_list; /* list of acct_association_rec_t *'s */
 	List coord_accts; /* list of acct_coord_rec_t *'s */
 	char *default_acct;
 	char *name;
-	List qos_list; /* list of char * */
 	uint32_t uid;
 } acct_user_rec_t;
 
 typedef struct {
+	List acct_list; /* list of char * */
 	List action_list; /* list of char * */
 	List actor_list; /* list of char * */
+	List cluster_list; /* list of char * */
 	List id_list; /* list of char * */
+	List info_list; /* list of char * */
+	List name_list; /* list of char * */
 	uint32_t time_end; 
 	uint32_t time_start; 
+	List user_list; /* list of char * */
+	uint16_t with_assoc_info;
 } acct_txn_cond_t;
 
 typedef struct {
+	char *accts;
 	uint16_t action;
 	char *actor_name;
+	char *clusters;
 	uint32_t id;
 	char *set_info;
 	time_t timestamp;
+	char *users;
 	char *where_query;
 } acct_txn_rec_t;
 
 typedef struct {
 	List objects; /* depending on type */ 
-	acct_update_type_t type;
+	uint16_t type; /* really acct_update_type_t but for
+				  * packing purposes needs to be a
+				  * uint16_t */
 } acct_update_object_t;
 
 typedef struct {
@@ -274,6 +408,13 @@ typedef struct {
 	uint64_t resv_secs; /* number of cpu seconds reserved */	
 } cluster_accounting_rec_t;
 
+
+typedef struct {
+	char *name;
+	char *print_name;
+	char *spaces;
+} acct_print_tree_t;
+
 extern void destroy_acct_user_rec(void *object);
 extern void destroy_acct_account_rec(void *object);
 extern void destroy_acct_coord_rec(void *object);
@@ -293,56 +434,92 @@ extern void destroy_acct_qos_cond(void *object);
 extern void destroy_acct_txn_cond(void *object);
 
 extern void destroy_acct_update_object(void *object);
+extern void destroy_acct_used_limits(void *object);
 extern void destroy_update_shares_rec(void *object);
+extern void destroy_acct_print_tree(void *object);
 
+extern void init_acct_association_rec(acct_association_rec_t *assoc);
+extern void init_acct_qos_rec(acct_qos_rec_t *qos);
 
 /* pack functions */
-extern void pack_acct_user_rec(void *in, Buf buffer);
-extern int unpack_acct_user_rec(void **object, Buf buffer);
-extern void pack_acct_account_rec(void *in, Buf buffer);
-extern int unpack_acct_account_rec(void **object, Buf buffer);
-extern void pack_acct_coord_rec(void *in, Buf buffer);
-extern int unpack_acct_coord_rec(void **object, Buf buffer);
-extern void pack_cluster_accounting_rec(void *in, Buf buffer);
-extern int unpack_cluster_accounting_rec(void **object, Buf buffer);
-extern void pack_acct_cluster_rec(void *in, Buf buffer);
-extern int unpack_acct_cluster_rec(void **object, Buf buffer);
-extern void pack_acct_accounting_rec(void *in, Buf buffer);
-extern int unpack_acct_accounting_rec(void **object, Buf buffer);
-extern void pack_acct_association_rec(void *in, Buf buffer);
-extern int unpack_acct_association_rec(void **object, Buf buffer);
-extern void pack_acct_qos_rec(void *in, Buf buffer);
-extern int unpack_acct_qos_rec(void **object, Buf buffer);
-extern void pack_acct_txn_rec(void *in, Buf buffer);
-extern int unpack_acct_txn_rec(void **object, Buf buffer);
-
-extern void pack_acct_user_cond(void *in, Buf buffer);
-extern int unpack_acct_user_cond(void **object, Buf buffer);
-extern void pack_acct_account_cond(void *in, Buf buffer);
-extern int unpack_acct_account_cond(void **object, Buf buffer);
-extern void pack_acct_cluster_cond(void *in, Buf buffer);
-extern int unpack_acct_cluster_cond(void **object, Buf buffer);
-extern void pack_acct_association_cond(void *in, Buf buffer);
-extern int unpack_acct_association_cond(void **object, Buf buffer);
-extern void pack_acct_job_cond(void *in, Buf buffer);
-extern int unpack_acct_job_cond(void **object, Buf buffer);
-extern void pack_acct_qos_cond(void *in, Buf buffer);
-extern int unpack_acct_qos_cond(void **object, Buf buffer);
-extern void pack_acct_txn_cond(void *in, Buf buffer);
-extern int unpack_acct_txn_cond(void **object, Buf buffer);
-
-extern void pack_acct_update_object(acct_update_object_t *object, Buf buffer);
-extern int unpack_acct_update_object(acct_update_object_t **object, Buf buffer);
-
-extern void pack_update_shares_used(void *in, Buf buffer);
-extern int unpack_update_shares_used(void **object, Buf buffer);
+extern void pack_acct_user_rec(void *in, uint16_t rpc_version, Buf buffer);
+extern int unpack_acct_user_rec(void **object, uint16_t rpc_version, 
+				Buf buffer);
+extern void pack_acct_account_rec(void *in, uint16_t rpc_version, Buf buffer);
+extern int unpack_acct_account_rec(void **object, uint16_t rpc_version, 
+				   Buf buffer);
+extern void pack_acct_coord_rec(void *in, uint16_t rpc_version, Buf buffer);
+extern int unpack_acct_coord_rec(void **object, uint16_t rpc_version,
+				 Buf buffer);
+extern void pack_cluster_accounting_rec(void *in, uint16_t rpc_version, 
+					Buf buffer);
+extern int unpack_cluster_accounting_rec(void **object, uint16_t rpc_version,
+					 Buf buffer);
+extern void pack_acct_cluster_rec(void *in, uint16_t rpc_version, Buf buffer);
+extern int unpack_acct_cluster_rec(void **object, uint16_t rpc_version,
+				   Buf buffer);
+extern void pack_acct_accounting_rec(void *in, uint16_t rpc_version,
+				     Buf buffer);
+extern int unpack_acct_accounting_rec(void **object, uint16_t rpc_version, 
+				      Buf buffer);
+extern void pack_acct_association_rec(void *in, uint16_t rpc_version, 
+				      Buf buffer);
+extern int unpack_acct_association_rec(void **object, uint16_t rpc_version,
+				       Buf buffer);
+extern void pack_acct_qos_rec(void *in, uint16_t rpc_version, Buf buffer);
+extern int unpack_acct_qos_rec(void **object, uint16_t rpc_version, Buf buffer);
+extern void pack_acct_txn_rec(void *in, uint16_t rpc_version, Buf buffer);
+extern int unpack_acct_txn_rec(void **object, uint16_t rpc_version, Buf buffer);
+
+extern void pack_acct_user_cond(void *in, uint16_t rpc_version, Buf buffer);
+extern int unpack_acct_user_cond(void **object, uint16_t rpc_version,
+				 Buf buffer);
+extern void pack_acct_account_cond(void *in, uint16_t rpc_version, Buf buffer);
+extern int unpack_acct_account_cond(void **object, uint16_t rpc_version,
+				    Buf buffer);
+extern void pack_acct_cluster_cond(void *in, uint16_t rpc_version, Buf buffer);
+extern int unpack_acct_cluster_cond(void **object, uint16_t rpc_version, 
+				    Buf buffer);
+extern void pack_acct_association_cond(void *in, uint16_t rpc_version,
+				       Buf buffer);
+extern int unpack_acct_association_cond(void **object, uint16_t rpc_version, 
+					Buf buffer);
+extern void pack_acct_job_cond(void *in, uint16_t rpc_version, Buf buffer);
+extern int unpack_acct_job_cond(void **object, uint16_t rpc_version,
+				Buf buffer);
+extern void pack_acct_qos_cond(void *in, uint16_t rpc_version, Buf buffer);
+extern int unpack_acct_qos_cond(void **object, uint16_t rpc_version,
+				Buf buffer);
+extern void pack_acct_txn_cond(void *in, uint16_t rpc_version, Buf buffer);
+extern int unpack_acct_txn_cond(void **object, uint16_t rpc_version,
+				Buf buffer);
+
+extern void pack_acct_update_object(acct_update_object_t *object, 
+				    uint16_t rpc_version, Buf buffer);
+extern int unpack_acct_update_object(acct_update_object_t **object,
+				     uint16_t rpc_version, Buf buffer);
+
+extern void pack_acct_used_limits(void *in, uint16_t rpc_version, Buf buffer);
+extern int unpack_acct_used_limits(void **object,
+				   uint16_t rpc_version, Buf buffer);
+
+extern void pack_update_shares_used(void *in, uint16_t rpc_version,
+				    Buf buffer);
+extern int unpack_update_shares_used(void **object, uint16_t rpc_version, 
+				     Buf buffer);
 
 extern char *acct_qos_str(List qos_list, uint32_t level);
 extern uint32_t str_2_acct_qos(List qos_list, char *level);
 extern char *acct_admin_level_str(acct_admin_level_t level);
 extern acct_admin_level_t str_2_acct_admin_level(char *level);
 
-extern void log_assoc_rec(acct_association_rec_t *assoc_ptr);
+/* IN/OUT: tree_list a list of acct_print_tree_t's */ 
+extern char *get_tree_acct_name(char *name, char *parent, char *cluster, 
+				List tree_list);
+
+extern char *get_qos_complete_str(List qos_list, List num_qos_list);
+
+extern void log_assoc_rec(acct_association_rec_t *assoc_ptr, List qos_list);
 
 extern int slurm_acct_storage_init(char *loc); /* load the plugin */
 extern int slurm_acct_storage_fini(void); /* unload the plugin */
@@ -350,10 +527,13 @@ extern int slurm_acct_storage_fini(void); /* unload the plugin */
 /*
  * get a new connection to the storage unit
  * IN: make_agent - Make an agent to manage queued requests
+ * IN: conn_num - If running more than one connection to the database
+ *     this can be used to tell which connection is doing what
  * IN: rollback - maintain journal of changes to permit rollback
  * RET: pointer used to access db 
  */
-extern void *acct_storage_g_get_connection(bool make_agent, bool rollback);
+extern void *acct_storage_g_get_connection(bool make_agent, int conn_num,
+					   bool rollback);
 
 /*
  * release connection to the storage unit
@@ -463,6 +643,16 @@ extern List acct_storage_g_modify_associations(
 	acct_association_cond_t *assoc_cond,
 	acct_association_rec_t *assoc);
 
+/* 
+ * modify existing qos in the accounting system 
+ * IN:  acct_qos_cond_t *qos_cond
+ * IN:  acct_qos_rec_t *qos
+ * RET: List containing (char *'s) else NULL on error
+ */
+extern List acct_storage_g_modify_qos(void *db_conn, uint32_t uid, 
+				      acct_qos_cond_t *qos_cond,
+				      acct_qos_rec_t *qos);
+
 /* 
  * remove users from accounting system 
  * IN:  acct_user_cond_t *user_cond
@@ -640,7 +830,7 @@ extern int clusteracct_storage_g_get_usage(
 /* 
  * load into the storage the start of a job
  */
-extern int jobacct_storage_g_job_start (void *db_conn, 
+extern int jobacct_storage_g_job_start (void *db_conn, char *cluster_name,
 					struct job_record *job_ptr);
 
 /* 
diff --git a/src/common/slurm_errno.c b/src/common/slurm_errno.c
index b8ee3417b..0cdab103d 100644
--- a/src/common/slurm_errno.c
+++ b/src/common/slurm_errno.c
@@ -218,7 +218,7 @@ static slurm_errtab_t slurm_errtab[] = {
 	  "The node configuration changes that were made require restart "
 	  "of the slurmctld daemon to take effect"},
 	{ ESLURM_ACCOUNTING_POLICY,
-	  "Job violates accounting policy (the user's size and/or time limits)"},
+	  "Job violates accounting policy (job submit limit, the user's size and/or time limits)"},
 	{ ESLURM_INVALID_TIME_LIMIT,
 	  "Requested time limit exceeds partition limit"	},
 
diff --git a/src/common/slurm_jobcomp.c b/src/common/slurm_jobcomp.c
index 3cdba9b90..18c0c1c19 100644
--- a/src/common/slurm_jobcomp.c
+++ b/src/common/slurm_jobcomp.c
@@ -219,14 +219,12 @@ jobcomp_destroy_job(void *object)
 		xfree(job->jobname);
 		xfree(job->state);
 		xfree(job->timelimit);
-#ifdef HAVE_BG
 		xfree(job->blockid);
 		xfree(job->connection);
 		xfree(job->reboot);
 		xfree(job->rotate);
 		xfree(job->geo);
 		xfree(job->bg_start_point);
-#endif
 		xfree(job);
 	}
 }
diff --git a/src/common/slurm_jobcomp.h b/src/common/slurm_jobcomp.h
index b99b7b418..b1dab1f6d 100644
--- a/src/common/slurm_jobcomp.h
+++ b/src/common/slurm_jobcomp.h
@@ -68,7 +68,6 @@ typedef struct {
 	char *jobname;
 	char *state;
 	char *timelimit;
-#ifdef HAVE_BG
 	char *blockid;
 	char *connection;
 	char *reboot;
@@ -76,7 +75,6 @@ typedef struct {
 	uint32_t max_procs;
 	char *geo;
 	char *bg_start_point;
-#endif
 } jobcomp_job_rec_t;
 
 typedef struct slurm_jobcomp_context * slurm_jobcomp_context_t;
diff --git a/src/common/slurm_protocol_api.c b/src/common/slurm_protocol_api.c
index a2d295e31..c5845959a 100644
--- a/src/common/slurm_protocol_api.c
+++ b/src/common/slurm_protocol_api.c
@@ -612,7 +612,7 @@ char *slurm_get_accounting_storage_loc(void)
 }
 
 /* slurm_get_accounting_storage_enforce
- * returns whether or not to enforce associations
+ * returns what level to enforce associations at
  */
 int slurm_get_accounting_storage_enforce(void)
 {
@@ -629,6 +629,29 @@ int slurm_get_accounting_storage_enforce(void)
 
 }
 
+/* slurm_get_is_association_based_accounting
+ * returns if we are doing accounting by associations
+ */
+int slurm_get_is_association_based_accounting(void)
+{
+	int enforce = 0;
+	slurm_ctl_conf_t *conf;
+
+	if(slurmdbd_conf) {
+		return 1;
+	} else {
+		conf = slurm_conf_lock();
+		if(!strcasecmp(conf->accounting_storage_type, 
+			      "accounting_storage/slurmdbd")
+		   || strcasecmp(conf->accounting_storage_type,
+				 "accounting_storage/mysql")) 
+			enforce = 1;
+		slurm_conf_unlock();
+	}
+	return enforce;	
+
+}
+
 /* slurm_set_accounting_storage_loc
  * IN: char *loc (name of file or database)
  * RET 0 or error code
@@ -2303,6 +2326,7 @@ int slurm_send_rc_msg(slurm_msg_t *msg, int rc)
 	resp_msg.address  = msg->address;
 	resp_msg.msg_type = RESPONSE_SLURM_RC;
 	resp_msg.data     = &rc_msg;
+	resp_msg.flags = msg->flags;
 	resp_msg.forward = msg->forward;
 	resp_msg.forward_struct = msg->forward_struct;
 	resp_msg.ret_list = msg->ret_list;
diff --git a/src/common/slurm_protocol_api.h b/src/common/slurm_protocol_api.h
index 566900fc3..969d14f95 100644
--- a/src/common/slurm_protocol_api.h
+++ b/src/common/slurm_protocol_api.h
@@ -245,17 +245,22 @@ char *slurm_get_accounting_storage_user(void);
  */
 char *slurm_get_accounting_storage_host(void);
 
+/* slurm_get_accounting_storage_enforce
+ * returns what level to enforce associations at
+ */
+int slurm_get_accounting_storage_enforce(void);
+
+/* slurm_get_is_association_based_accounting
+ * returns if we are doing accounting by associations
+ */
+int slurm_get_is_association_based_accounting(void);
+
 /* slurm_get_accounting_storage_pass
  * returns the storage password from slurmctld_conf object
  * RET char *    - storage location,  MUST be xfreed by caller
  */
 char *slurm_get_accounting_storage_loc(void);
 
-/* slurm_get_accounting_storage_enforce
- * returns whether or not to enforce associations
- */
-int slurm_get_accounting_storage_enforce(void);
-
 /* slurm_set_accounting_storage_loc
  * IN: char *loc (name of file or database)
  * RET 0 or error code
diff --git a/src/common/slurm_protocol_defs.c b/src/common/slurm_protocol_defs.c
index 4a63d4277..e71d6b637 100644
--- a/src/common/slurm_protocol_defs.c
+++ b/src/common/slurm_protocol_defs.c
@@ -79,7 +79,7 @@ extern void slurm_msg_t_init(slurm_msg_t *msg)
 {
 	memset(msg, 0, sizeof(slurm_msg_t));
 
-	msg->msg_type = (slurm_msg_type_t)NO_VAL;
+	msg->msg_type = (uint16_t)NO_VAL;
 	msg->conn_fd = -1;
 
 	forward_init(&msg->forward, NULL);
@@ -141,24 +141,23 @@ extern int slurm_addto_char_list(List char_list, char *names)
 			else if (names[i] == '\"' || names[i] == '\'')
 				names[i] = '`';
 			else if(names[i] == ',') {
-				if((i-start) > 0) {
-					name = xmalloc((i-start+1));
-					memcpy(name, names+start, (i-start));
-					//info("got %s %d", name, i-start);
-
-					while((tmp_char = list_next(itr))) {
-						if(!strcasecmp(tmp_char, name))
-							break;
-					}
-
-					if(!tmp_char) {
-						_make_lower(name);
-						list_append(char_list, name);
-						count++;
-					} else 
-						xfree(name);
-					list_iterator_reset(itr);
+				name = xmalloc((i-start+1));
+				memcpy(name, names+start, (i-start));
+				//info("got %s %d", name, i-start);
+				
+				while((tmp_char = list_next(itr))) {
+					if(!strcasecmp(tmp_char, name))
+						break;
 				}
+				
+				if(!tmp_char) {
+					_make_lower(name);
+					list_append(char_list, name);
+					count++;
+				} else 
+					xfree(name);
+				list_iterator_reset(itr);
+				
 				i++;
 				start = i;
 				if(!names[i]) {
@@ -170,26 +169,49 @@ extern int slurm_addto_char_list(List char_list, char *names)
 			}
 			i++;
 		}
-		if((i-start) > 0) {
-			name = xmalloc((i-start)+1);
-			memcpy(name, names+start, (i-start));
-			while((tmp_char = list_next(itr))) {
-				if(!strcasecmp(tmp_char, name))
-					break;
-			}
-			
-			if(!tmp_char) {
-				_make_lower(name);
-				list_append(char_list, name);
-				count++;
-			} else 
-				xfree(name);
+
+		name = xmalloc((i-start)+1);
+		memcpy(name, names+start, (i-start));
+		while((tmp_char = list_next(itr))) {
+			if(!strcasecmp(tmp_char, name))
+				break;
 		}
+		
+		if(!tmp_char) {
+			_make_lower(name);
+			list_append(char_list, name);
+			count++;
+		} else 
+			xfree(name);
 	}	
 	list_iterator_destroy(itr);
 	return count;
 } 
 
+extern int slurm_sort_char_list_asc(char *name_a, char *name_b)
+{
+	int diff = strcmp(name_a, name_b);
+
+	if (diff < 0)
+		return -1;
+	else if (diff > 0)
+		return 1;
+	
+	return 0;
+}
+
+extern int slurm_sort_char_list_desc(char *name_a, char *name_b)
+{
+	int diff = strcmp(name_a, name_b);
+
+	if (diff > 0)
+		return -1;
+	else if (diff < 0)
+		return 1;
+	
+	return 0;
+}
+
 void slurm_free_last_update_msg(last_update_msg_t * msg)
 {
 	xfree(msg);
@@ -754,6 +776,9 @@ private_data_string(uint16_t private_data, char *str, int str_len)
 		strcat(str, "accounts"); //9 len
 	}
 	// total len 42
+
+	if (str[0] == '\0')
+		strcat(str, "none");
 }
 
 char *job_state_string(enum job_states inx)
@@ -1051,6 +1076,7 @@ void slurm_free_ctl_conf(slurm_ctl_conf_info_msg_t * config_ptr)
 		xfree(config_ptr->propagate_rlimits);
 		xfree(config_ptr->propagate_rlimits_except);
 		xfree(config_ptr->resume_program);
+		xfree(config_ptr->salloc_default_command);
 		xfree(config_ptr->sched_params);
 		xfree(config_ptr->schedtype);
 		xfree(config_ptr->select_type);
@@ -1441,6 +1467,7 @@ extern int slurm_free_msg_data(slurm_msg_type_t type, void *data)
 	case RESPONSE_FORWARD_FAILED:
 	case REQUEST_DAEMON_STATUS:
 	case REQUEST_HEALTH_CHECK:
+	case ACCOUNTING_FIRST_REG:
 		/* No body to free */
 		break;
 	case ACCOUNTING_UPDATE_MSG:
diff --git a/src/common/slurm_protocol_defs.h b/src/common/slurm_protocol_defs.h
index 587f5d2cf..cfb2f9c5a 100644
--- a/src/common/slurm_protocol_defs.h
+++ b/src/common/slurm_protocol_defs.h
@@ -206,6 +206,7 @@ typedef enum {
 	RESPONSE_FORWARD_FAILED = 9001,
 
 	ACCOUNTING_UPDATE_MSG = 10001,
+	ACCOUNTING_FIRST_REG,
 
 } slurm_msg_type_t;
 
@@ -228,7 +229,8 @@ typedef struct forward {
 typedef struct slurm_protocol_header {
 	uint16_t version;
 	uint16_t flags;
-	slurm_msg_type_t msg_type;
+	uint16_t msg_type; /* really slurm_msg_type_t but needs to be
+			      uint16_t for packing purposes. */
 	uint32_t body_length;
 	uint16_t ret_cnt;
 	forward_t forward;
@@ -263,7 +265,8 @@ typedef struct slurm_protocol_config {
 } slurm_protocol_config_t;
 
 typedef struct slurm_msg {
-	slurm_msg_type_t msg_type;
+	uint16_t msg_type; /* really a slurm_msg_type_t but needs to be
+			    * this way for packing purposes.  message type */
 	uint16_t flags;
 	slurm_addr address;       
 	slurm_fd conn_fd;
@@ -279,7 +282,8 @@ typedef struct slurm_msg {
 } slurm_msg_t;
 
 typedef struct ret_data_info {
-	slurm_msg_type_t type; /* message type */
+	uint16_t type; /* really a slurm_msg_type_t but needs to be
+			* this way for packing purposes.  message type */
 	uint32_t err;
 	char *node_name;
 	void *data; /* used to hold the return message data (i.e. 
@@ -725,6 +729,7 @@ typedef struct slurm_node_registration_status_msg {
 
 typedef struct {
 	List update_list; /* of type acct_update_object_t *'s */
+	uint16_t rpc_version;
 } accounting_update_msg_t;
 
 typedef struct slurm_ctl_conf slurm_ctl_conf_info_msg_t;
@@ -750,6 +755,8 @@ extern void slurm_msg_t_copy(slurm_msg_t *dest, slurm_msg_t *src);
 
 extern void slurm_destroy_char(void *object);
 extern int slurm_addto_char_list(List char_list, char *names);
+extern int slurm_sort_char_list_asc(char *name_a, char *name_b);
+extern int slurm_sort_char_list_desc(char *name_a, char *name_b);
 
 /* free message functions */
 void inline slurm_free_checkpoint_tasks_msg(checkpoint_tasks_msg_t * msg);
diff --git a/src/common/slurm_protocol_pack.c b/src/common/slurm_protocol_pack.c
index c7ef3665c..f5a335677 100644
--- a/src/common/slurm_protocol_pack.c
+++ b/src/common/slurm_protocol_pack.c
@@ -62,6 +62,7 @@
 #include "src/common/xassert.h"
 #include "src/common/forward.h"
 #include "src/common/job_options.h"
+#include "src/common/slurmdbd_defs.h"
 
 #define _pack_job_info_msg(msg,buf)		_pack_buffer_msg(msg,buf)
 #define _pack_job_step_info_msg(msg,buf)	_pack_buffer_msg(msg,buf)
@@ -344,7 +345,8 @@ static void _pack_will_run_response_msg(will_run_response_msg_t *msg, Buf buffer
 static int  _unpack_will_run_response_msg(will_run_response_msg_t ** msg_ptr, 
 					  Buf buffer);
 
-static void _pack_accounting_update_msg(accounting_update_msg_t *msg, Buf buffer);
+static void _pack_accounting_update_msg(accounting_update_msg_t *msg, 
+					Buf buffer);
 static int _unpack_accounting_update_msg(accounting_update_msg_t **msg,
 					 Buf buffer);
 
@@ -360,7 +362,7 @@ pack_header(header_t * header, Buf buffer)
 	
 	pack16((uint16_t)header->version, buffer);
 	pack16((uint16_t)header->flags, buffer);
-	pack16((uint16_t) header->msg_type, buffer);
+	pack16((uint16_t)header->msg_type, buffer);
 	pack32((uint32_t)header->body_length, buffer);
 	pack16((uint16_t)header->forward.cnt, buffer);
 	if (header->forward.cnt > 0) {
@@ -385,7 +387,6 @@ pack_header(header_t * header, Buf buffer)
 int
 unpack_header(header_t * header, Buf buffer)
 {
-	uint16_t uint16_tmp;
 	uint32_t uint32_tmp = 0;
 
 	memset(header, 0, sizeof(header_t));
@@ -393,8 +394,7 @@ unpack_header(header_t * header, Buf buffer)
 	header->ret_list = NULL;
 	safe_unpack16(&header->version, buffer);
 	safe_unpack16(&header->flags, buffer);
-	safe_unpack16(&uint16_tmp, buffer);
-	header->msg_type = (slurm_msg_type_t) uint16_tmp;
+	safe_unpack16(&header->msg_type, buffer);
 	safe_unpack32(&header->body_length, buffer);
 	safe_unpack16(&header->forward.cnt, buffer);
 	if (header->forward.cnt > 0) {		
@@ -486,6 +486,7 @@ pack_msg(slurm_msg_t const *msg, Buf buffer)
 	case REQUEST_CONTROL:
 	case REQUEST_DAEMON_STATUS:
 	case REQUEST_HEALTH_CHECK:
+	case ACCOUNTING_FIRST_REG:
 		/* Message contains no body/information */
 		break;
 	case REQUEST_SHUTDOWN:
@@ -816,6 +817,7 @@ unpack_msg(slurm_msg_t * msg, Buf buffer)
 	case REQUEST_CONTROL:
 	case REQUEST_DAEMON_STATUS:
 	case REQUEST_HEALTH_CHECK:
+	case ACCOUNTING_FIRST_REG:
 		/* Message contains no body/information */
 		break;
 	case REQUEST_SHUTDOWN:
@@ -1092,7 +1094,7 @@ unpack_msg(slurm_msg_t * msg, Buf buffer)
 			(set_debug_level_msg_t **)&(msg->data), buffer);
 		break;
 	case ACCOUNTING_UPDATE_MSG:
-		_unpack_accounting_update_msg(
+		rc = _unpack_accounting_update_msg(
 			(accounting_update_msg_t **)&msg->data,
 			buffer);
 		break;
@@ -3760,7 +3762,6 @@ _unpack_ret_list(List *ret_list,
 		 uint16_t size_val, Buf buffer)
 {
 	int i = 0;
-	uint16_t uint16_tmp;
 	uint32_t uint32_tmp;
 	ret_data_info_t *ret_data_info = NULL;
 	slurm_msg_t msg;
@@ -3771,8 +3772,7 @@ _unpack_ret_list(List *ret_list,
 		list_push(*ret_list, ret_data_info);
 		
 		safe_unpack32((uint32_t *)&ret_data_info->err, buffer);
-		safe_unpack16(&uint16_tmp, buffer);
-		ret_data_info->type = (slurm_msg_type_t)uint16_tmp;
+		safe_unpack16(&ret_data_info->type, buffer);
 		safe_unpackstr_xmalloc(&ret_data_info->node_name, 
 				       &uint32_tmp, buffer);
 		msg.msg_type = ret_data_info->type;
@@ -4764,7 +4764,7 @@ static void _pack_accounting_update_msg(accounting_update_msg_t *msg,
 	if(count) {
 		itr = list_iterator_create(msg->update_list);
 		while((rec = list_next(itr))) {
-			pack_acct_update_object(rec, buffer);
+			pack_acct_update_object(rec, msg->rpc_version, buffer);
 		}
 		list_iterator_destroy(itr);
 	}
@@ -4784,7 +4784,12 @@ static int _unpack_accounting_update_msg(accounting_update_msg_t **msg,
 	safe_unpack32(&count, buffer);
 	msg_ptr->update_list = list_create(destroy_acct_update_object);
 	for(i=0; i<count; i++) {
-		if((unpack_acct_update_object(&rec, buffer)) == SLURM_ERROR)
+		/* this is only ran in the slurmctld so we can just
+		   use the version here.
+		*/
+		if((unpack_acct_update_object(&rec, SLURMDBD_VERSION,
+					      buffer))
+		   == SLURM_ERROR)
 			goto unpack_error;
 		list_append(msg_ptr->update_list, rec);
 	}
diff --git a/src/common/slurmdbd_defs.c b/src/common/slurmdbd_defs.c
index c527f6f20..804a894d7 100644
--- a/src/common/slurmdbd_defs.c
+++ b/src/common/slurmdbd_defs.c
@@ -63,6 +63,7 @@
 #include "src/common/fd.h"
 #include "src/common/pack.h"
 #include "src/common/slurmdbd_defs.h"
+#include "src/common/assoc_mgr.h"
 #include "src/common/slurm_auth.h"
 #include "src/common/slurm_protocol_api.h"
 #include "src/common/slurm_accounting_storage.h"
@@ -74,8 +75,11 @@
 #define DBD_MAGIC		0xDEAD3219
 #define MAX_AGENT_QUEUE		10000
 #define MAX_DBD_MSG_LEN		16384
-#define SLURMDBD_TIMEOUT	60	/* Seconds SlurmDBD for response */
+#define SLURMDBD_TIMEOUT	300	/* Seconds SlurmDBD for response */
 
+bool running_cache = 0;
+
+static pthread_mutex_t replace_cache = PTHREAD_MUTEX_INITIALIZER;
 static pthread_mutex_t agent_lock = PTHREAD_MUTEX_INITIALIZER;
 static pthread_cond_t  agent_cond = PTHREAD_COND_INITIALIZER;
 static List      agent_list     = (List) NULL;
@@ -93,7 +97,7 @@ static void   _close_slurmdbd_fd(void);
 static void   _create_agent(void);
 static bool   _fd_readable(slurm_fd fd, int read_timeout);
 static int    _fd_writeable(slurm_fd fd);
-static int    _get_return_code(int read_timeout);
+static int    _get_return_code(uint16_t rpc_version, int read_timeout);
 static Buf    _load_dbd_rec(int fd);
 static void   _load_dbd_state(void);
 static void   _open_slurmdbd_fd(void);
@@ -107,8 +111,8 @@ static int    _send_fini_msg(void);
 static int    _send_msg(Buf buffer);
 static void   _sig_handler(int signal);
 static void   _shutdown_agent(void);
-static void   _slurmdbd_packstr(void *str, Buf buffer);
-static int    _slurmdbd_unpackstr(void **str, Buf buffer);
+static void   _slurmdbd_packstr(void *str, uint16_t rpc_version, Buf buffer);
+static int    _slurmdbd_unpackstr(void **str, uint16_t rpc_version, Buf buffer);
 static int    _tot_wait (struct timeval *start_time);
 
 /****************************************************************************
@@ -123,11 +127,9 @@ static int    _tot_wait (struct timeval *start_time);
 extern int slurm_open_slurmdbd_conn(char *auth_info, bool make_agent, 
 				    bool rollback)
 {
-	slurm_mutex_lock(&agent_lock);
-	if (make_agent && ((agent_tid == 0) || (agent_list == NULL)))
-		_create_agent();
-	slurm_mutex_unlock(&agent_lock);
-
+	/* we need to set this up before we make the agent or we will
+	   get a threading issue.
+	*/
 	slurm_mutex_lock(&slurmdbd_lock);
 	xfree(slurmdbd_auth_info);
 	if (auth_info)
@@ -139,7 +141,15 @@ extern int slurm_open_slurmdbd_conn(char *auth_info, bool make_agent,
 		_open_slurmdbd_fd();
 	slurm_mutex_unlock(&slurmdbd_lock);
 
-	return SLURM_SUCCESS;
+	slurm_mutex_lock(&agent_lock);
+	if (make_agent && ((agent_tid == 0) || (agent_list == NULL)))
+		_create_agent();
+	slurm_mutex_unlock(&agent_lock);
+
+	if (slurmdbd_fd < 0)
+		return SLURM_ERROR;
+	else
+		return SLURM_SUCCESS;
 }
 
 /* Close the SlurmDBD socket connection */
@@ -166,7 +176,8 @@ extern int slurm_close_slurmdbd_conn(void)
 /* Send an RPC to the SlurmDBD and wait for the return code reply.
  * The RPC will not be queued if an error occurs.
  * Returns SLURM_SUCCESS or an error code */
-extern int slurm_send_slurmdbd_recv_rc_msg(slurmdbd_msg_t *req, int *resp_code)
+extern int slurm_send_slurmdbd_recv_rc_msg(uint16_t rpc_version, 
+					   slurmdbd_msg_t *req, int *resp_code)
 {
 	int rc;
 	slurmdbd_msg_t *resp;
@@ -175,7 +186,7 @@ extern int slurm_send_slurmdbd_recv_rc_msg(slurmdbd_msg_t *req, int *resp_code)
 	xassert(resp_code);
 
 	resp = xmalloc(sizeof(slurmdbd_msg_t));
-	rc = slurm_send_recv_slurmdbd_msg(req, resp);
+	rc = slurm_send_recv_slurmdbd_msg(rpc_version, req, resp);
 	if (rc != SLURM_SUCCESS) {
 		;	/* error message already sent */
 	} else if (resp->msg_type != DBD_RC) {
@@ -187,7 +198,7 @@ extern int slurm_send_slurmdbd_recv_rc_msg(slurmdbd_msg_t *req, int *resp_code)
 		if(msg->return_code != SLURM_SUCCESS)
 			error("slurmdbd(%d): from %u: %s", msg->return_code, 
 			      msg->sent_type, msg->comment);
-		slurmdbd_free_rc_msg(msg);
+		slurmdbd_free_rc_msg(rpc_version, msg);
 	}
 	xfree(resp);
 
@@ -198,7 +209,8 @@ extern int slurm_send_slurmdbd_recv_rc_msg(slurmdbd_msg_t *req, int *resp_code)
  * The RPC will not be queued if an error occurs.
  * The "resp" message must be freed by the caller.
  * Returns SLURM_SUCCESS or an error code */
-extern int slurm_send_recv_slurmdbd_msg(slurmdbd_msg_t *req, 
+extern int slurm_send_recv_slurmdbd_msg(uint16_t rpc_version, 
+					slurmdbd_msg_t *req, 
 					slurmdbd_msg_t *resp)
 {
 	int rc = SLURM_SUCCESS, read_timeout;
@@ -219,7 +231,7 @@ extern int slurm_send_recv_slurmdbd_msg(slurmdbd_msg_t *req,
 		}
 	}
 
-	buffer = pack_slurmdbd_msg(req);
+	buffer = pack_slurmdbd_msg(rpc_version, req);
 
 	rc = _send_msg(buffer);
 	free_buf(buffer);
@@ -237,10 +249,11 @@ extern int slurm_send_recv_slurmdbd_msg(slurmdbd_msg_t *req,
 		return SLURM_ERROR;
 	}
 		
-	rc = unpack_slurmdbd_msg(resp, buffer);
+	rc = unpack_slurmdbd_msg(rpc_version, resp, buffer);
 
 	free_buf(buffer);
 	slurm_mutex_unlock(&slurmdbd_lock);
+	
 	return rc;
 }
 
@@ -249,14 +262,14 @@ extern int slurm_send_recv_slurmdbd_msg(slurmdbd_msg_t *req,
  * NOTE: slurm_open_slurmdbd_conn() must have been called with make_agent set
  * 
  * Returns SLURM_SUCCESS or an error code */
-extern int slurm_send_slurmdbd_msg(slurmdbd_msg_t *req)
+extern int slurm_send_slurmdbd_msg(uint16_t rpc_version, slurmdbd_msg_t *req)
 {
 	Buf buffer;
 	int cnt, rc = SLURM_SUCCESS;
 	static time_t syslog_time = 0;
 
 	
-	buffer = pack_slurmdbd_msg(req);
+	buffer = pack_slurmdbd_msg(rpc_version, req);
 
 	slurm_mutex_lock(&agent_lock);
 	if ((agent_tid == 0) || (agent_list == NULL)) {
@@ -315,6 +328,7 @@ static void _open_slurmdbd_fd(void)
 		      slurmdbd_host, slurmdbd_port);
 	else {
 		slurmdbd_fd = slurm_open_msg_conn(&dbd_addr);
+
 		if (slurmdbd_fd < 0)
 			error("slurmdbd: slurm_open_msg_conn: %m");
 		else {
@@ -328,7 +342,7 @@ static void _open_slurmdbd_fd(void)
 	xfree(slurmdbd_host);
 }
 
-extern Buf pack_slurmdbd_msg(slurmdbd_msg_t *req)
+extern Buf pack_slurmdbd_msg(uint16_t rpc_version, slurmdbd_msg_t *req)
 {
 	Buf buffer = init_buf(MAX_DBD_MSG_LEN);
 	pack16(req->msg_type, buffer);
@@ -349,16 +363,19 @@ extern Buf pack_slurmdbd_msg(slurmdbd_msg_t *req)
 	case DBD_GOT_USERS:
 	case DBD_UPDATE_SHARES_USED:
 		slurmdbd_pack_list_msg(
-			req->msg_type, (dbd_list_msg_t *)req->data, buffer);
+			rpc_version, req->msg_type, 
+			(dbd_list_msg_t *)req->data, buffer);
 		break;
 	case DBD_ADD_ACCOUNT_COORDS:
 	case DBD_REMOVE_ACCOUNT_COORDS:
-		slurmdbd_pack_acct_coord_msg((dbd_acct_coord_msg_t *)req->data,
+		slurmdbd_pack_acct_coord_msg(rpc_version,
+					     (dbd_acct_coord_msg_t *)req->data,
 					     buffer);
 		break;
 	case DBD_CLUSTER_PROCS:
 	case DBD_FLUSH_JOBS:
 		slurmdbd_pack_cluster_procs_msg(
+			rpc_version, 
 			(dbd_cluster_procs_msg_t *)req->data, buffer);
 		break;
 	case DBD_GET_ACCOUNTS:
@@ -374,41 +391,49 @@ extern Buf pack_slurmdbd_msg(slurmdbd_msg_t *req)
 	case DBD_REMOVE_QOS:
 	case DBD_REMOVE_USERS:
 		slurmdbd_pack_cond_msg(
-			req->msg_type, (dbd_cond_msg_t *)req->data, buffer);
+			rpc_version, req->msg_type,
+			(dbd_cond_msg_t *)req->data, buffer);
 		break;
 	case DBD_GET_ASSOC_USAGE:
 	case DBD_GOT_ASSOC_USAGE:
 	case DBD_GET_CLUSTER_USAGE:
 	case DBD_GOT_CLUSTER_USAGE:
 		slurmdbd_pack_usage_msg(
-			req->msg_type, (dbd_usage_msg_t *)req->data,
-			buffer);
+			rpc_version, req->msg_type,
+			(dbd_usage_msg_t *)req->data, buffer);
 		break;
 	case DBD_GET_JOBS:
 		slurmdbd_pack_get_jobs_msg(
+			rpc_version, 
 			(dbd_get_jobs_msg_t *)req->data, buffer);
 		break;
 	case DBD_INIT:
-		slurmdbd_pack_init_msg((dbd_init_msg_t *)req->data, buffer, 
+		slurmdbd_pack_init_msg(rpc_version,
+				       (dbd_init_msg_t *)req->data, buffer, 
 				       slurmdbd_auth_info);
 		break;
 	case DBD_FINI:
-		slurmdbd_pack_fini_msg((dbd_fini_msg_t *)req->data, buffer);
+		slurmdbd_pack_fini_msg(rpc_version,
+				       (dbd_fini_msg_t *)req->data, buffer);
 		break;		
 	case DBD_JOB_COMPLETE:
-		slurmdbd_pack_job_complete_msg((dbd_job_comp_msg_t *)req->data,
+		slurmdbd_pack_job_complete_msg(rpc_version,
+					       (dbd_job_comp_msg_t *)req->data,
 					       buffer);
 		break;
 	case DBD_JOB_START:
-		slurmdbd_pack_job_start_msg((dbd_job_start_msg_t *)req->data, 
+		slurmdbd_pack_job_start_msg(rpc_version,
+					    (dbd_job_start_msg_t *)req->data, 
 					    buffer);
 		break;
 	case DBD_JOB_START_RC:
 		slurmdbd_pack_job_start_rc_msg(
+			rpc_version,
 			(dbd_job_start_rc_msg_t *)req->data, buffer);
 		break;		
 	case DBD_JOB_SUSPEND:
 		slurmdbd_pack_job_suspend_msg(
+			rpc_version,
 			(dbd_job_suspend_msg_t *)req->data, buffer);
 		break;
 	case DBD_MODIFY_ACCOUNTS:
@@ -416,29 +441,36 @@ extern Buf pack_slurmdbd_msg(slurmdbd_msg_t *req)
 	case DBD_MODIFY_CLUSTERS:
 	case DBD_MODIFY_USERS:
 		slurmdbd_pack_modify_msg(
-			req->msg_type, (dbd_modify_msg_t *)req->data, buffer);
+			rpc_version, req->msg_type,
+			(dbd_modify_msg_t *)req->data, buffer);
 		break;
 	case DBD_NODE_STATE:
 		slurmdbd_pack_node_state_msg(
+			rpc_version,
 			(dbd_node_state_msg_t *)req->data, buffer);
 		break;
 	case DBD_RC:
-		slurmdbd_pack_rc_msg((dbd_rc_msg_t *)req->data, buffer);
+		slurmdbd_pack_rc_msg(rpc_version,
+				     (dbd_rc_msg_t *)req->data, buffer);
 		break;
 	case DBD_STEP_COMPLETE:
 		slurmdbd_pack_step_complete_msg(
+			rpc_version,
 			(dbd_step_comp_msg_t *)req->data, buffer);
 		break;
 	case DBD_STEP_START:
-		slurmdbd_pack_step_start_msg((dbd_step_start_msg_t *)req->data,
+		slurmdbd_pack_step_start_msg(rpc_version,
+					     (dbd_step_start_msg_t *)req->data,
 					     buffer);
 		break;
 	case DBD_REGISTER_CTLD:
-		slurmdbd_pack_register_ctld_msg((dbd_register_ctld_msg_t *)
-						req->data, buffer);
+		slurmdbd_pack_register_ctld_msg(
+			rpc_version,
+			(dbd_register_ctld_msg_t *)req->data, buffer);
 		break;
 	case DBD_ROLL_USAGE:
-		slurmdbd_pack_roll_usage_msg((dbd_roll_usage_msg_t *)
+		slurmdbd_pack_roll_usage_msg(rpc_version,
+					     (dbd_roll_usage_msg_t *)
 					     req->data, buffer);
 		break;
 	default:
@@ -452,7 +484,8 @@ extern Buf pack_slurmdbd_msg(slurmdbd_msg_t *req)
 	return buffer;
 }
 
-extern int unpack_slurmdbd_msg(slurmdbd_msg_t *resp, Buf buffer)
+extern int unpack_slurmdbd_msg(uint16_t rpc_version, 
+			       slurmdbd_msg_t *resp, Buf buffer)
 {
 	int rc = SLURM_SUCCESS;
        
@@ -474,16 +507,19 @@ extern int unpack_slurmdbd_msg(slurmdbd_msg_t *resp, Buf buffer)
 	case DBD_GOT_USERS:
 	case DBD_UPDATE_SHARES_USED:
 		rc = slurmdbd_unpack_list_msg(
-			resp->msg_type, (dbd_list_msg_t **)&resp->data, buffer);
+			rpc_version, resp->msg_type,
+			(dbd_list_msg_t **)&resp->data, buffer);
 		break;
 	case DBD_ADD_ACCOUNT_COORDS:
 	case DBD_REMOVE_ACCOUNT_COORDS:
 		rc = slurmdbd_unpack_acct_coord_msg(
+			rpc_version,
 			(dbd_acct_coord_msg_t **)&resp->data, buffer);
 		break;
 	case DBD_CLUSTER_PROCS:
 	case DBD_FLUSH_JOBS:
 		rc = slurmdbd_unpack_cluster_procs_msg(
+			rpc_version,
 			(dbd_cluster_procs_msg_t **)&resp->data, buffer);
 		break;
 	case DBD_GET_ACCOUNTS:
@@ -499,43 +535,52 @@ extern int unpack_slurmdbd_msg(slurmdbd_msg_t *resp, Buf buffer)
 	case DBD_REMOVE_QOS:
 	case DBD_REMOVE_USERS:
 		rc = slurmdbd_unpack_cond_msg(
-			resp->msg_type, (dbd_cond_msg_t **)&resp->data, buffer);
+			rpc_version, resp->msg_type,
+			(dbd_cond_msg_t **)&resp->data, buffer);
 		break;
 	case DBD_GET_ASSOC_USAGE:
 	case DBD_GOT_ASSOC_USAGE:
 	case DBD_GET_CLUSTER_USAGE:
 	case DBD_GOT_CLUSTER_USAGE:
 		rc = slurmdbd_unpack_usage_msg(
+			rpc_version,
 			resp->msg_type, (dbd_usage_msg_t **)&resp->data, 
 			buffer);
 		break;
 	case DBD_GET_JOBS:
 		rc = slurmdbd_unpack_get_jobs_msg(
+			rpc_version,
 			(dbd_get_jobs_msg_t **)&resp->data, buffer);
 		break;
 	case DBD_INIT:
-		rc = slurmdbd_unpack_init_msg((dbd_init_msg_t **)&resp->data,
+		rc = slurmdbd_unpack_init_msg(rpc_version,
+					      (dbd_init_msg_t **)&resp->data,
 					      buffer, 
 					      slurmdbd_auth_info);
 		break;
 	case DBD_FINI:
-		rc = slurmdbd_unpack_fini_msg((dbd_fini_msg_t **)&resp->data,
+		rc = slurmdbd_unpack_fini_msg(rpc_version,
+					      (dbd_fini_msg_t **)&resp->data,
 					      buffer);
 		break;		
 	case DBD_JOB_COMPLETE:
 		rc = slurmdbd_unpack_job_complete_msg(
+			rpc_version,
 			(dbd_job_comp_msg_t **)&resp->data, buffer);
 		break;
 	case DBD_JOB_START:
 		rc = slurmdbd_unpack_job_start_msg(
+			rpc_version,
 			(dbd_job_start_msg_t **)&resp->data, buffer);
 		break;
 	case DBD_JOB_START_RC:
 		rc = slurmdbd_unpack_job_start_rc_msg(
+			rpc_version,
 			(dbd_job_start_rc_msg_t **)&resp->data, buffer);
 		break;		
 	case DBD_JOB_SUSPEND:
 		rc = slurmdbd_unpack_job_suspend_msg(
+			rpc_version,
 			(dbd_job_suspend_msg_t **)&resp->data, buffer);
 		break;
 	case DBD_MODIFY_ACCOUNTS:
@@ -543,31 +588,38 @@ extern int unpack_slurmdbd_msg(slurmdbd_msg_t *resp, Buf buffer)
 	case DBD_MODIFY_CLUSTERS:
 	case DBD_MODIFY_USERS:
 		rc = slurmdbd_unpack_modify_msg(
+			rpc_version,
 			resp->msg_type, (dbd_modify_msg_t **)&resp->data,
 			buffer);
 		break;
 	case DBD_NODE_STATE:
 		rc = slurmdbd_unpack_node_state_msg(
+			rpc_version,
 			(dbd_node_state_msg_t **)&resp->data, buffer);
 		break;
 	case DBD_RC:
-		rc = slurmdbd_unpack_rc_msg((dbd_rc_msg_t **)&resp->data,
+		rc = slurmdbd_unpack_rc_msg(rpc_version,
+					    (dbd_rc_msg_t **)&resp->data,
 					    buffer);
 		break;
 	case DBD_STEP_COMPLETE:
 		rc = slurmdbd_unpack_step_complete_msg(
+			rpc_version,
 			(dbd_step_comp_msg_t **)&resp->data, buffer);
 		break;
 	case DBD_STEP_START:
 		rc = slurmdbd_unpack_step_start_msg(
+			rpc_version,
 			(dbd_step_start_msg_t **)&resp->data, buffer);
 		break;
 	case DBD_REGISTER_CTLD:
 		rc = slurmdbd_unpack_register_ctld_msg(
+			rpc_version,
 			(dbd_register_ctld_msg_t **)&resp->data, buffer);
 		break;
 	case DBD_ROLL_USAGE:
 		rc = slurmdbd_unpack_roll_usage_msg(
+			rpc_version,
 			(dbd_roll_usage_msg_t **)&resp->data, buffer);
 		break;
 	default:
@@ -1013,7 +1065,7 @@ extern char *slurmdbd_msg_type_2_str(slurmdbd_msg_type_t msg_type, int get_enum)
 	return "Unknown";
 }
 
-static int _send_init_msg(void)
+static int _send_init_msg()
 {
 	int rc, read_timeout;
 	Buf buffer;
@@ -1023,7 +1075,8 @@ static int _send_init_msg(void)
 	pack16((uint16_t) DBD_INIT, buffer);
 	req.rollback = rollback_started;
 	req.version  = SLURMDBD_VERSION;
-	slurmdbd_pack_init_msg(&req, buffer, slurmdbd_auth_info);
+	slurmdbd_pack_init_msg(SLURMDBD_VERSION, &req, buffer,
+			       slurmdbd_auth_info);
 
 	rc = _send_msg(buffer);
 	free_buf(buffer);
@@ -1033,7 +1086,8 @@ static int _send_init_msg(void)
 	}
 
 	read_timeout = slurm_get_msg_timeout() * 1000;
-	rc = _get_return_code(read_timeout);
+	rc = _get_return_code(SLURMDBD_VERSION, read_timeout);
+	
 	return rc;
 }
 
@@ -1046,7 +1100,7 @@ static int _send_fini_msg(void)
 	pack16((uint16_t) DBD_FINI, buffer);
 	req.commit  = 0;
 	req.close_conn   = 1;
-	slurmdbd_pack_fini_msg(&req, buffer);
+	slurmdbd_pack_fini_msg(SLURMDBD_VERSION, &req, buffer);
 
 	_send_msg(buffer);
 	free_buf(buffer);
@@ -1115,7 +1169,7 @@ static int _send_msg(Buf buffer)
 	return SLURM_SUCCESS;
 }
 
-static int _get_return_code(int read_timeout)
+static int _get_return_code(uint16_t rpc_version, int read_timeout)
 {
 	Buf buffer;
 	uint16_t msg_type;
@@ -1130,33 +1184,45 @@ static int _get_return_code(int read_timeout)
 	safe_unpack16(&msg_type, buffer);
 	switch(msg_type) {
 	case DBD_JOB_START_RC:
-		if (slurmdbd_unpack_job_start_rc_msg(&js_msg, buffer)
+		if (slurmdbd_unpack_job_start_rc_msg(rpc_version, 
+						     &js_msg, buffer)
 		    == SLURM_SUCCESS) {
 			rc = js_msg->return_code;
-			slurmdbd_free_job_start_rc_msg(js_msg);
+			slurmdbd_free_job_start_rc_msg(rpc_version, js_msg);
 			if (rc != SLURM_SUCCESS)
 				error("slurmdbd: DBD_JOB_START_RC is %d", rc);
 		} else
 			error("slurmdbd: unpack message error");
 		break;
 	case DBD_RC:
-		if (slurmdbd_unpack_rc_msg(&msg, buffer) == SLURM_SUCCESS) {
+		if (slurmdbd_unpack_rc_msg(rpc_version, 
+					   &msg, buffer) == SLURM_SUCCESS) {
 			rc = msg->return_code;
 			if (rc != SLURM_SUCCESS) {
-				error("slurmdbd: DBD_RC is %d from %s(%u): %s",
-				      rc,
-				      slurmdbd_msg_type_2_str(msg->sent_type,
-							       1),
-				      msg->sent_type,
-				      msg->comment);
 				if(msg->sent_type == DBD_REGISTER_CTLD &&
-				   slurm_get_accounting_storage_enforce())
+				   slurm_get_accounting_storage_enforce()) {
+					error("slurmdbd: DBD_RC is %d from "
+					      "%s(%u): %s",
+					      rc,
+					      slurmdbd_msg_type_2_str(
+						      msg->sent_type, 1),
+					      msg->sent_type,
+					      msg->comment);
 					fatal("You need to add this cluster "
 					      "to accounting if you want to "
 					      "enforce associations, or no "
 					      "jobs will ever run.");
+				} else
+					error("slurmdbd: DBD_RC is %d from "
+					      "%s(%u): %s",
+					      rc,
+					      slurmdbd_msg_type_2_str(
+						      msg->sent_type, 1),
+					      msg->sent_type,
+					      msg->comment);
+				
 			}
-			slurmdbd_free_rc_msg(msg);
+			slurmdbd_free_rc_msg(rpc_version, msg);
 		} else
 			error("slurmdbd: unpack message error");
 		break;
@@ -1288,6 +1354,7 @@ static int _fd_writeable(slurm_fd fd)
 	int write_timeout = 5000;
 	int rc, time_left;
 	struct timeval tstart;
+	char temp[2];
 
 	ufds.fd     = fd;
 	ufds.events = POLLOUT;
@@ -1303,7 +1370,14 @@ static int _fd_writeable(slurm_fd fd)
 		}
 		if (rc == 0)
 			return 0;
-		if (ufds.revents & POLLHUP) {
+		/*
+		 * Check here to make sure the socket really is there.
+		 * If not then exit out and notify the sender.  This
+ 		 * is here since a write doesn't always tell you the
+		 * socket is gone, but getting 0 back from a
+		 * nonblocking read means just that. 
+		 */
+		if (ufds.revents & POLLHUP || (recv(fd, &temp, 1, 0) == 0)) {
 			debug2("SlurmDBD connection is closed");
 			return -1;
 		}
@@ -1380,12 +1454,12 @@ static void _shutdown_agent(void)
 	}
 }
 
-static void _slurmdbd_packstr(void *str, Buf buffer)
+static void _slurmdbd_packstr(void *str, uint16_t rpc_version, Buf buffer)
 {
 	packstr((char *)str, buffer);
 }
 
-static int _slurmdbd_unpackstr(void **str, Buf buffer)
+static int _slurmdbd_unpackstr(void **str, uint16_t rpc_version, Buf buffer)
 {
 	uint32_t uint32_tmp;
 	safe_unpackstr_xmalloc((char **)str, &uint32_tmp, buffer);
@@ -1412,7 +1486,7 @@ static void *_agent(void *x)
 
 		slurm_mutex_lock(&slurmdbd_lock);
 		if ((slurmdbd_fd < 0) && 
-		    (difftime(time(NULL), fail_time) >= 10)) {
+		    (difftime(time(NULL), fail_time) >= 10)) {			
 			/* The connection to Slurm DBD is not open */
 			_open_slurmdbd_fd();
 			if (slurmdbd_fd < 0)
@@ -1443,6 +1517,16 @@ static void *_agent(void *x)
 		slurm_mutex_unlock(&agent_lock);
 		if (buffer == NULL) {
 			slurm_mutex_unlock(&slurmdbd_lock);
+
+			slurm_mutex_lock(&replace_cache);
+			/* It is ok to send a NULL as the first value since
+			 * this will most likely only happen when talking with
+			 * the DBD 
+			 */
+			if(slurmdbd_fd >= 0 && running_cache)
+				assoc_mgr_refresh_lists(NULL, NULL);		
+			slurm_mutex_unlock(&replace_cache);
+			
 			continue;
 		}
 
@@ -1455,7 +1539,7 @@ static void *_agent(void *x)
 				break;
 			error("slurmdbd: Failure sending message");
 		} else {
-			rc = _get_return_code(read_timeout);
+			rc = _get_return_code(SLURMDBD_VERSION, read_timeout);
 			if (rc == EAGAIN) {
 				if (agent_shutdown)
 					break;
@@ -1464,6 +1548,15 @@ static void *_agent(void *x)
 			}
 		}
 		slurm_mutex_unlock(&slurmdbd_lock);
+		
+		slurm_mutex_lock(&replace_cache);
+		/* It is ok to send a NULL as the first value since
+		 * this will most likely only happen when talking with
+		 * the DBD 
+		 */
+		if(slurmdbd_fd >= 0 && running_cache)
+			assoc_mgr_refresh_lists(NULL, NULL);		
+		slurm_mutex_unlock(&replace_cache);
 
 		slurm_mutex_lock(&agent_lock);
 		if (agent_list && (rc == SLURM_SUCCESS)) {
@@ -1667,7 +1760,8 @@ static int _purge_job_start_req(void)
 /****************************************************************************\
  * Free data structures
 \****************************************************************************/
-void inline slurmdbd_free_acct_coord_msg(dbd_acct_coord_msg_t *msg)
+void inline slurmdbd_free_acct_coord_msg(uint16_t rpc_version, 
+					 dbd_acct_coord_msg_t *msg)
 {
 	if(msg) {
 		if(msg->acct_list) {
@@ -1678,7 +1772,8 @@ void inline slurmdbd_free_acct_coord_msg(dbd_acct_coord_msg_t *msg)
 		xfree(msg);
 	}
 }
-void inline slurmdbd_free_cluster_procs_msg(dbd_cluster_procs_msg_t *msg)
+void inline slurmdbd_free_cluster_procs_msg(uint16_t rpc_version, 
+					    dbd_cluster_procs_msg_t *msg)
 {
 	if (msg) {
 		xfree(msg->cluster_name);
@@ -1686,7 +1781,8 @@ void inline slurmdbd_free_cluster_procs_msg(dbd_cluster_procs_msg_t *msg)
 	}
 }
 
-void inline slurmdbd_free_cond_msg(slurmdbd_msg_type_t type,
+void inline slurmdbd_free_cond_msg(uint16_t rpc_version, 
+				   slurmdbd_msg_type_t type,
 				   dbd_cond_msg_t *msg)
 {
 	void (*my_destroy) (void *object);
@@ -1729,7 +1825,8 @@ void inline slurmdbd_free_cond_msg(slurmdbd_msg_type_t type,
 	}
 }
 
-void inline slurmdbd_free_get_jobs_msg(dbd_get_jobs_msg_t *msg)
+void inline slurmdbd_free_get_jobs_msg(uint16_t rpc_version, 
+				       dbd_get_jobs_msg_t *msg)
 {
 	if (msg) {
 		xfree(msg->cluster_name);
@@ -1742,17 +1839,20 @@ void inline slurmdbd_free_get_jobs_msg(dbd_get_jobs_msg_t *msg)
 	}
 }
 
-void inline slurmdbd_free_init_msg(dbd_init_msg_t *msg)
+void inline slurmdbd_free_init_msg(uint16_t rpc_version, 
+				   dbd_init_msg_t *msg)
 {
 	xfree(msg);
 }
 
-void inline slurmdbd_free_fini_msg(dbd_fini_msg_t *msg)
+void inline slurmdbd_free_fini_msg(uint16_t rpc_version, 
+				   dbd_fini_msg_t *msg)
 {
 	xfree(msg);
 }
 
-void inline slurmdbd_free_job_complete_msg(dbd_job_comp_msg_t *msg)
+void inline slurmdbd_free_job_complete_msg(uint16_t rpc_version, 
+					   dbd_job_comp_msg_t *msg)
 {
 	if (msg) {
 		xfree(msg->nodes);
@@ -1760,11 +1860,13 @@ void inline slurmdbd_free_job_complete_msg(dbd_job_comp_msg_t *msg)
 	}
 }
 
-void inline slurmdbd_free_job_start_msg(dbd_job_start_msg_t *msg)
+void inline slurmdbd_free_job_start_msg(uint16_t rpc_version, 
+					dbd_job_start_msg_t *msg)
 {
 	if (msg) {
 		xfree(msg->account);
 		xfree(msg->block_id);
+		xfree(msg->cluster);
 		xfree(msg->name);
 		xfree(msg->nodes);
 		xfree(msg->partition);
@@ -1772,17 +1874,20 @@ void inline slurmdbd_free_job_start_msg(dbd_job_start_msg_t *msg)
 	}
 }
 
-void inline slurmdbd_free_job_start_rc_msg(dbd_job_start_rc_msg_t *msg)
+void inline slurmdbd_free_job_start_rc_msg(uint16_t rpc_version, 
+					   dbd_job_start_rc_msg_t *msg)
 {
 	xfree(msg);
 }
 
-void inline slurmdbd_free_job_suspend_msg(dbd_job_suspend_msg_t *msg)
+void inline slurmdbd_free_job_suspend_msg(uint16_t rpc_version, 
+					  dbd_job_suspend_msg_t *msg)
 {
 	xfree(msg);
 }
 
-void inline slurmdbd_free_list_msg(dbd_list_msg_t *msg)
+void inline slurmdbd_free_list_msg(uint16_t rpc_version, 
+				   dbd_list_msg_t *msg)
 {
 	if (msg) {
 		if(msg->my_list)
@@ -1791,7 +1896,8 @@ void inline slurmdbd_free_list_msg(dbd_list_msg_t *msg)
 	}
 }
 
-void inline slurmdbd_free_modify_msg(slurmdbd_msg_type_t type,
+void inline slurmdbd_free_modify_msg(uint16_t rpc_version, 
+				     slurmdbd_msg_type_t type,
 				     dbd_modify_msg_t *msg)
 {
 	void (*destroy_cond) (void *object);
@@ -1828,7 +1934,8 @@ void inline slurmdbd_free_modify_msg(slurmdbd_msg_type_t type,
 	}
 }
 
-void inline slurmdbd_free_node_state_msg(dbd_node_state_msg_t *msg)
+void inline slurmdbd_free_node_state_msg(uint16_t rpc_version, 
+					 dbd_node_state_msg_t *msg)
 {
 	if (msg) {
 		xfree(msg->cluster_name);
@@ -1838,7 +1945,8 @@ void inline slurmdbd_free_node_state_msg(dbd_node_state_msg_t *msg)
 	}
 }
 
-void inline slurmdbd_free_rc_msg(dbd_rc_msg_t *msg)
+void inline slurmdbd_free_rc_msg(uint16_t rpc_version, 
+				 dbd_rc_msg_t *msg)
 {
 	if(msg) {
 		xfree(msg->comment);
@@ -1846,7 +1954,8 @@ void inline slurmdbd_free_rc_msg(dbd_rc_msg_t *msg)
 	}
 }
 
-void inline slurmdbd_free_register_ctld_msg(dbd_register_ctld_msg_t *msg)
+void inline slurmdbd_free_register_ctld_msg(uint16_t rpc_version, 
+					    dbd_register_ctld_msg_t *msg)
 {
 	if(msg) {
 		xfree(msg->cluster_name);
@@ -1854,12 +1963,14 @@ void inline slurmdbd_free_register_ctld_msg(dbd_register_ctld_msg_t *msg)
 	}
 }
 
-void inline slurmdbd_free_roll_usage_msg(dbd_roll_usage_msg_t *msg)
+void inline slurmdbd_free_roll_usage_msg(uint16_t rpc_version, 
+					 dbd_roll_usage_msg_t *msg)
 {
 	xfree(msg);
 }
 
-void inline slurmdbd_free_step_complete_msg(dbd_step_comp_msg_t *msg)
+void inline slurmdbd_free_step_complete_msg(uint16_t rpc_version, 
+					    dbd_step_comp_msg_t *msg)
 {
 	if (msg) {
 		xfree(msg->jobacct);
@@ -1867,7 +1978,8 @@ void inline slurmdbd_free_step_complete_msg(dbd_step_comp_msg_t *msg)
 	}
 }
 
-void inline slurmdbd_free_step_start_msg(dbd_step_start_msg_t *msg)
+void inline slurmdbd_free_step_start_msg(uint16_t rpc_version, 
+					 dbd_step_start_msg_t *msg)
 {
 	if (msg) {
 		xfree(msg->name);
@@ -1876,7 +1988,8 @@ void inline slurmdbd_free_step_start_msg(dbd_step_start_msg_t *msg)
 	}
 }
 
-void inline slurmdbd_free_usage_msg(slurmdbd_msg_type_t type,
+void inline slurmdbd_free_usage_msg(uint16_t rpc_version, 
+				    slurmdbd_msg_type_t type,
 				    dbd_usage_msg_t *msg)
 {
 	void (*destroy_rec) (void *object);
@@ -1905,7 +2018,8 @@ void inline slurmdbd_free_usage_msg(slurmdbd_msg_type_t type,
  * Pack and unpack data structures
 \****************************************************************************/
 void inline
-slurmdbd_pack_acct_coord_msg(dbd_acct_coord_msg_t *msg, Buf buffer)
+slurmdbd_pack_acct_coord_msg(uint16_t rpc_version,
+			     dbd_acct_coord_msg_t *msg, Buf buffer)
 {
 	char *acct = NULL;
 	ListIterator itr = NULL;
@@ -1924,11 +2038,12 @@ slurmdbd_pack_acct_coord_msg(dbd_acct_coord_msg_t *msg, Buf buffer)
 	}
 	count = 0;
 
-	pack_acct_user_cond(msg->cond, buffer);
+	pack_acct_user_cond(msg->cond, rpc_version, buffer);
 }
 
 int inline
-slurmdbd_unpack_acct_coord_msg(dbd_acct_coord_msg_t **msg, Buf buffer)
+slurmdbd_unpack_acct_coord_msg(uint16_t rpc_version, 
+			       dbd_acct_coord_msg_t **msg, Buf buffer)
 {
 	uint32_t uint32_tmp;
 	int i;
@@ -1946,18 +2061,20 @@ slurmdbd_unpack_acct_coord_msg(dbd_acct_coord_msg_t **msg, Buf buffer)
 		}
 	}
 
-	if(unpack_acct_user_cond((void *)&msg_ptr->cond, buffer) == SLURM_ERROR)
+	if(unpack_acct_user_cond((void *)&msg_ptr->cond, rpc_version, buffer) 
+	   == SLURM_ERROR)
 		goto unpack_error;
 	return SLURM_SUCCESS;
 
 unpack_error:
-	slurmdbd_free_acct_coord_msg(msg_ptr);
+	slurmdbd_free_acct_coord_msg(rpc_version, msg_ptr);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
 
 void inline
-slurmdbd_pack_cluster_procs_msg(dbd_cluster_procs_msg_t *msg, Buf buffer)
+slurmdbd_pack_cluster_procs_msg(uint16_t rpc_version, 
+				dbd_cluster_procs_msg_t *msg, Buf buffer)
 {
 	packstr(msg->cluster_name, buffer);
 	pack32(msg->proc_count,    buffer);
@@ -1965,7 +2082,8 @@ slurmdbd_pack_cluster_procs_msg(dbd_cluster_procs_msg_t *msg, Buf buffer)
 }
 
 int inline
-slurmdbd_unpack_cluster_procs_msg(dbd_cluster_procs_msg_t **msg, Buf buffer)
+slurmdbd_unpack_cluster_procs_msg(uint16_t rpc_version,
+				  dbd_cluster_procs_msg_t **msg, Buf buffer)
 {
 	dbd_cluster_procs_msg_t *msg_ptr;
 	uint32_t uint32_tmp;
@@ -1978,15 +2096,16 @@ slurmdbd_unpack_cluster_procs_msg(dbd_cluster_procs_msg_t **msg, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
-	slurmdbd_free_cluster_procs_msg(msg_ptr);
+	slurmdbd_free_cluster_procs_msg(rpc_version, msg_ptr);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
 
-void inline slurmdbd_pack_cond_msg(slurmdbd_msg_type_t type,
+void inline slurmdbd_pack_cond_msg(uint16_t rpc_version, 
+				   slurmdbd_msg_type_t type,
 				   dbd_cond_msg_t *msg, Buf buffer)
 {
-	void (*my_function) (void *object, Buf buffer);
+	void (*my_function) (void *object, uint16_t rpc_version, Buf buffer);
 
 	switch(type) {
 	case DBD_GET_ACCOUNTS:
@@ -2020,14 +2139,15 @@ void inline slurmdbd_pack_cond_msg(slurmdbd_msg_type_t type,
 		return;
 	}
 
-	(*(my_function))(msg->cond, buffer);
+	(*(my_function))(msg->cond, rpc_version, buffer);
 }
 
-int inline slurmdbd_unpack_cond_msg(slurmdbd_msg_type_t type,
+int inline slurmdbd_unpack_cond_msg(uint16_t rpc_version, 
+				    slurmdbd_msg_type_t type,
 				    dbd_cond_msg_t **msg, Buf buffer)
 {
 	dbd_cond_msg_t *msg_ptr = NULL;
-	int (*my_function) (void **object, Buf buffer);
+	int (*my_function) (void **object, uint16_t rpc_version, Buf buffer);
 
 	switch(type) {
 	case DBD_GET_ACCOUNTS:
@@ -2064,18 +2184,19 @@ int inline slurmdbd_unpack_cond_msg(slurmdbd_msg_type_t type,
 	msg_ptr = xmalloc(sizeof(dbd_cond_msg_t));
 	*msg = msg_ptr;
 
-	if((*(my_function))(&msg_ptr->cond, buffer) == SLURM_ERROR)
+	if((*(my_function))(&msg_ptr->cond, rpc_version, buffer) == SLURM_ERROR)
 		goto unpack_error;
 	
 	return SLURM_SUCCESS;
 
 unpack_error:
-	slurmdbd_free_cond_msg(type, msg_ptr);
+	slurmdbd_free_cond_msg(rpc_version, type, msg_ptr);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
 
-void inline slurmdbd_pack_get_jobs_msg(dbd_get_jobs_msg_t *msg, Buf buffer)
+void inline slurmdbd_pack_get_jobs_msg(uint16_t rpc_version,
+				       dbd_get_jobs_msg_t *msg, Buf buffer)
 {
 	uint32_t i = 0;
 	ListIterator itr = NULL;
@@ -2097,7 +2218,7 @@ void inline slurmdbd_pack_get_jobs_msg(dbd_get_jobs_msg_t *msg, Buf buffer)
 	if(i) {
 		itr = list_iterator_create(msg->selected_steps);
 		while((job = list_next(itr))) {
-			pack_jobacct_selected_step(job, buffer);
+			pack_jobacct_selected_step(job, rpc_version, buffer);
 		}
 		list_iterator_destroy(itr);
 	}
@@ -2117,7 +2238,8 @@ void inline slurmdbd_pack_get_jobs_msg(dbd_get_jobs_msg_t *msg, Buf buffer)
 	packstr(msg->user, buffer);
 }
 
-int inline slurmdbd_unpack_get_jobs_msg(dbd_get_jobs_msg_t **msg, Buf buffer)
+int inline slurmdbd_unpack_get_jobs_msg(uint16_t rpc_version,
+					dbd_get_jobs_msg_t **msg, Buf buffer)
 {
 	int i;
 	uint32_t count = 0;
@@ -2142,7 +2264,7 @@ int inline slurmdbd_unpack_get_jobs_msg(dbd_get_jobs_msg_t **msg, Buf buffer)
 		msg_ptr->selected_steps =
 			list_create(destroy_jobacct_selected_step);
 		for(i=0; i<count; i++) {
-			unpack_jobacct_selected_step(&job, buffer);
+			unpack_jobacct_selected_step(&job, rpc_version, buffer);
 			list_append(msg_ptr->selected_steps, job);
 		}
 	}
@@ -2160,13 +2282,14 @@ int inline slurmdbd_unpack_get_jobs_msg(dbd_get_jobs_msg_t **msg, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
-	slurmdbd_free_get_jobs_msg(msg_ptr);
+	slurmdbd_free_get_jobs_msg(rpc_version, msg_ptr);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
 
 void inline 
-slurmdbd_pack_init_msg(dbd_init_msg_t *msg, Buf buffer, char *auth_info)
+slurmdbd_pack_init_msg(uint16_t rpc_version, dbd_init_msg_t *msg, 
+		       Buf buffer, char *auth_info)
 {
 	int rc;
 	void *auth_cred;
@@ -2188,46 +2311,52 @@ slurmdbd_pack_init_msg(dbd_init_msg_t *msg, Buf buffer, char *auth_info)
 }
 
 int inline 
-slurmdbd_unpack_init_msg(dbd_init_msg_t **msg, Buf buffer, char *auth_info)
+slurmdbd_unpack_init_msg(uint16_t rpc_version, dbd_init_msg_t **msg,
+			 Buf buffer, char *auth_info)
 {
 	void *auth_cred;
 
 	dbd_init_msg_t *msg_ptr = xmalloc(sizeof(dbd_init_msg_t));
 	*msg = msg_ptr;
-
+	int rc = SLURM_SUCCESS;
+		
 	safe_unpack16(&msg_ptr->rollback, buffer);
 	safe_unpack16(&msg_ptr->version, buffer);
 	auth_cred = g_slurm_auth_unpack(buffer);
 	if (auth_cred == NULL) {
 		error("Unpacking authentication credential: %s",
 		      g_slurm_auth_errstr(g_slurm_auth_errno(NULL)));
+		rc = ESLURM_ACCESS_DENIED;
 		goto unpack_error;
 	}
 	msg_ptr->uid = g_slurm_auth_get_uid(auth_cred, auth_info);
 	if(g_slurm_auth_errno(auth_cred) != SLURM_SUCCESS) {
 		error("Bad authentication: %s",
 		      g_slurm_auth_errstr(g_slurm_auth_errno(auth_cred)));
+		rc = ESLURM_ACCESS_DENIED;
 		goto unpack_error;
 	}
 
 	g_slurm_auth_destroy(auth_cred);
-	return SLURM_SUCCESS;
+	return rc;
 
 unpack_error:
-	slurmdbd_free_init_msg(msg_ptr);
+	slurmdbd_free_init_msg(rpc_version, msg_ptr);
 	*msg = NULL;
-	return SLURM_ERROR;
+	if(rc == SLURM_SUCCESS)
+		rc = SLURM_ERROR;
+	return rc;
 }
 
 void inline 
-slurmdbd_pack_fini_msg(dbd_fini_msg_t *msg, Buf buffer)
+slurmdbd_pack_fini_msg(uint16_t rpc_version, dbd_fini_msg_t *msg, Buf buffer)
 {
 	pack16(msg->close_conn, buffer);
 	pack16(msg->commit, buffer);
 }
 
 int inline 
-slurmdbd_unpack_fini_msg(dbd_fini_msg_t **msg, Buf buffer)
+slurmdbd_unpack_fini_msg(uint16_t rpc_version, dbd_fini_msg_t **msg, Buf buffer)
 {
 	dbd_fini_msg_t *msg_ptr = xmalloc(sizeof(dbd_fini_msg_t));
 	*msg = msg_ptr;
@@ -2238,13 +2367,14 @@ slurmdbd_unpack_fini_msg(dbd_fini_msg_t **msg, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
-	slurmdbd_free_fini_msg(msg_ptr);
+	slurmdbd_free_fini_msg(rpc_version, msg_ptr);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
 
 void inline 
-slurmdbd_pack_job_complete_msg(dbd_job_comp_msg_t *msg, Buf buffer)
+slurmdbd_pack_job_complete_msg(uint16_t rpc_version, 
+			       dbd_job_comp_msg_t *msg, Buf buffer)
 {
 	pack32(msg->assoc_id, buffer);
 	pack32(msg->db_index, buffer);
@@ -2258,7 +2388,8 @@ slurmdbd_pack_job_complete_msg(dbd_job_comp_msg_t *msg, Buf buffer)
 }
 
 int inline 
-slurmdbd_unpack_job_complete_msg(dbd_job_comp_msg_t **msg, Buf buffer)
+slurmdbd_unpack_job_complete_msg(uint16_t rpc_version,
+				 dbd_job_comp_msg_t **msg, Buf buffer)
 {
 	uint32_t uint32_tmp;
 	dbd_job_comp_msg_t *msg_ptr = xmalloc(sizeof(dbd_job_comp_msg_t));
@@ -2275,73 +2406,123 @@ slurmdbd_unpack_job_complete_msg(dbd_job_comp_msg_t **msg, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
-	slurmdbd_free_job_complete_msg(msg_ptr);
+	slurmdbd_free_job_complete_msg(rpc_version, msg_ptr);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
 
 void inline 
-slurmdbd_pack_job_start_msg(dbd_job_start_msg_t *msg, Buf buffer)
-{
-	packstr(msg->account, buffer);
-	pack32(msg->alloc_cpus, buffer);
-	pack32(msg->assoc_id, buffer);
-	packstr(msg->block_id, buffer);
-	pack32(msg->db_index, buffer);
-	pack_time(msg->eligible_time, buffer);
-	pack32(msg->gid, buffer);
-	pack32(msg->job_id, buffer);
-	pack16(msg->job_state, buffer);
-	packstr(msg->name, buffer);
-	packstr(msg->nodes, buffer);
-	packstr(msg->partition, buffer);
-	pack32(msg->priority, buffer);
-	pack32(msg->req_cpus, buffer);
-	pack_time(msg->start_time, buffer);
-	pack_time(msg->submit_time, buffer);
-	pack32(msg->uid, buffer);
+slurmdbd_pack_job_start_msg(uint16_t rpc_version, 
+			    dbd_job_start_msg_t *msg, Buf buffer)
+{
+	if(rpc_version < 3) {
+		packstr(msg->account, buffer);
+		pack32(msg->alloc_cpus, buffer);
+		pack32(msg->assoc_id, buffer);
+		packstr(msg->block_id, buffer);
+		pack32(msg->db_index, buffer);
+		pack_time(msg->eligible_time, buffer);
+		pack32(msg->gid, buffer);
+		pack32(msg->job_id, buffer);
+		pack16(msg->job_state, buffer);
+		packstr(msg->name, buffer);
+		packstr(msg->nodes, buffer);
+		packstr(msg->partition, buffer);
+		pack32(msg->priority, buffer);
+		pack32(msg->req_cpus, buffer);
+		pack_time(msg->start_time, buffer);
+		pack_time(msg->submit_time, buffer);
+		pack32(msg->uid, buffer);
+	} else if(rpc_version >=3) {
+		packstr(msg->account, buffer);
+		pack32(msg->alloc_cpus, buffer);
+		pack32(msg->assoc_id, buffer);
+		packstr(msg->block_id, buffer);
+		packstr(msg->cluster, buffer);
+		pack32(msg->db_index, buffer);
+		pack_time(msg->eligible_time, buffer);
+		pack32(msg->gid, buffer);
+		pack32(msg->job_id, buffer);
+		pack16(msg->job_state, buffer);
+		packstr(msg->name, buffer);
+		packstr(msg->nodes, buffer);
+		packstr(msg->partition, buffer);
+		pack32(msg->priority, buffer);
+		pack32(msg->req_cpus, buffer);
+		pack_time(msg->start_time, buffer);
+		pack_time(msg->submit_time, buffer);
+		pack32(msg->uid, buffer);		
+	}
 }
 
 int inline 
-slurmdbd_unpack_job_start_msg(dbd_job_start_msg_t **msg, Buf buffer)
+slurmdbd_unpack_job_start_msg(uint16_t rpc_version,
+			      dbd_job_start_msg_t **msg, Buf buffer)
 {
 	uint32_t uint32_tmp;
 	dbd_job_start_msg_t *msg_ptr = xmalloc(sizeof(dbd_job_start_msg_t));
 	*msg = msg_ptr;
-	safe_unpackstr_xmalloc(&msg_ptr->account, &uint32_tmp, buffer);
-	safe_unpack32(&msg_ptr->alloc_cpus, buffer);
-	safe_unpack32(&msg_ptr->assoc_id, buffer);
-	safe_unpackstr_xmalloc(&msg_ptr->block_id, &uint32_tmp, buffer);
-	safe_unpack32(&msg_ptr->db_index, buffer);
-	safe_unpack_time(&msg_ptr->eligible_time, buffer);
-	safe_unpack32(&msg_ptr->gid, buffer);
-	safe_unpack32(&msg_ptr->job_id, buffer);
-	safe_unpack16(&msg_ptr->job_state, buffer);
-	safe_unpackstr_xmalloc(&msg_ptr->name, &uint32_tmp, buffer);
-	safe_unpackstr_xmalloc(&msg_ptr->nodes, &uint32_tmp, buffer);
-	safe_unpackstr_xmalloc(&msg_ptr->partition, &uint32_tmp, buffer);
-	safe_unpack32(&msg_ptr->priority, buffer);
-	safe_unpack32(&msg_ptr->req_cpus, buffer);
-	safe_unpack_time(&msg_ptr->start_time, buffer);
-	safe_unpack_time(&msg_ptr->submit_time, buffer);
-	safe_unpack32(&msg_ptr->uid, buffer);
+
+	if(rpc_version < 3) {
+		safe_unpackstr_xmalloc(&msg_ptr->account, &uint32_tmp, buffer);
+		safe_unpack32(&msg_ptr->alloc_cpus, buffer);
+		safe_unpack32(&msg_ptr->assoc_id, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->block_id, &uint32_tmp, buffer);
+		safe_unpack32(&msg_ptr->db_index, buffer);
+		safe_unpack_time(&msg_ptr->eligible_time, buffer);
+		safe_unpack32(&msg_ptr->gid, buffer);
+		safe_unpack32(&msg_ptr->job_id, buffer);
+		safe_unpack16(&msg_ptr->job_state, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->name, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->nodes, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->partition,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&msg_ptr->priority, buffer);
+		safe_unpack32(&msg_ptr->req_cpus, buffer);
+		safe_unpack_time(&msg_ptr->start_time, buffer);
+		safe_unpack_time(&msg_ptr->submit_time, buffer);
+		safe_unpack32(&msg_ptr->uid, buffer);
+	} else if(rpc_version >= 3) {
+		safe_unpackstr_xmalloc(&msg_ptr->account, &uint32_tmp, buffer);
+		safe_unpack32(&msg_ptr->alloc_cpus, buffer);
+		safe_unpack32(&msg_ptr->assoc_id, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->block_id, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->cluster, &uint32_tmp, buffer);
+		safe_unpack32(&msg_ptr->db_index, buffer);
+		safe_unpack_time(&msg_ptr->eligible_time, buffer);
+		safe_unpack32(&msg_ptr->gid, buffer);
+		safe_unpack32(&msg_ptr->job_id, buffer);
+		safe_unpack16(&msg_ptr->job_state, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->name, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->nodes, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->partition,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&msg_ptr->priority, buffer);
+		safe_unpack32(&msg_ptr->req_cpus, buffer);
+		safe_unpack_time(&msg_ptr->start_time, buffer);
+		safe_unpack_time(&msg_ptr->submit_time, buffer);
+		safe_unpack32(&msg_ptr->uid, buffer);	
+	}
+	
 	return SLURM_SUCCESS;
 
 unpack_error:
-	slurmdbd_free_job_start_msg(msg_ptr);
+	slurmdbd_free_job_start_msg(rpc_version, msg_ptr);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
 
 void inline 
-slurmdbd_pack_job_start_rc_msg(dbd_job_start_rc_msg_t *msg, Buf buffer)
+slurmdbd_pack_job_start_rc_msg(uint16_t rpc_version, 
+			       dbd_job_start_rc_msg_t *msg, Buf buffer)
 {
 	pack32(msg->db_index, buffer);
 	pack32(msg->return_code, buffer);
 }
 
 int inline 
-slurmdbd_unpack_job_start_rc_msg(dbd_job_start_rc_msg_t **msg, Buf buffer)
+slurmdbd_unpack_job_start_rc_msg(uint16_t rpc_version, 
+				 dbd_job_start_rc_msg_t **msg, Buf buffer)
 {
 	dbd_job_start_rc_msg_t *msg_ptr = 
 		xmalloc(sizeof(dbd_job_start_rc_msg_t));
@@ -2351,13 +2532,14 @@ slurmdbd_unpack_job_start_rc_msg(dbd_job_start_rc_msg_t **msg, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
-	slurmdbd_free_job_start_rc_msg(msg_ptr);
+	slurmdbd_free_job_start_rc_msg(rpc_version, msg_ptr);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
 
 void inline 
-slurmdbd_pack_job_suspend_msg(dbd_job_suspend_msg_t *msg, Buf buffer)
+slurmdbd_pack_job_suspend_msg(uint16_t rpc_version,
+			      dbd_job_suspend_msg_t *msg, Buf buffer)
 {
 	pack32(msg->assoc_id, buffer);
 	pack32(msg->db_index, buffer);
@@ -2368,7 +2550,8 @@ slurmdbd_pack_job_suspend_msg(dbd_job_suspend_msg_t *msg, Buf buffer)
 }
 
 int inline 
-slurmdbd_unpack_job_suspend_msg(dbd_job_suspend_msg_t **msg, Buf buffer)
+slurmdbd_unpack_job_suspend_msg(uint16_t rpc_version,
+				dbd_job_suspend_msg_t **msg, Buf buffer)
 {
 	dbd_job_suspend_msg_t *msg_ptr = xmalloc(sizeof(dbd_job_suspend_msg_t));
 	*msg = msg_ptr;
@@ -2381,18 +2564,19 @@ slurmdbd_unpack_job_suspend_msg(dbd_job_suspend_msg_t **msg, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
-	slurmdbd_free_job_suspend_msg(msg_ptr);
+	slurmdbd_free_job_suspend_msg(rpc_version, msg_ptr);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
 
-void inline slurmdbd_pack_list_msg(slurmdbd_msg_type_t type,
+void inline slurmdbd_pack_list_msg(uint16_t rpc_version,
+				   slurmdbd_msg_type_t type,
 				   dbd_list_msg_t *msg, Buf buffer)
 {
 	uint32_t count = 0;
 	ListIterator itr = NULL;
 	void *object = NULL;
-	void (*my_function) (void *object, Buf buffer);
+	void (*my_function) (void *object, uint16_t rpc_version, Buf buffer);
 
 	switch(type) {
 	case DBD_ADD_ACCOUNTS:
@@ -2441,20 +2625,21 @@ void inline slurmdbd_pack_list_msg(slurmdbd_msg_type_t type,
 	if(count) {
 		itr = list_iterator_create(msg->my_list);
 		while((object = list_next(itr))) {
-			(*(my_function))(object, buffer);
+			(*(my_function))(object, rpc_version, buffer);
 		}
 		list_iterator_destroy(itr);
 	}
 }
 
-int inline slurmdbd_unpack_list_msg(slurmdbd_msg_type_t type,
+int inline slurmdbd_unpack_list_msg(uint16_t rpc_version, 
+				    slurmdbd_msg_type_t type,
 				    dbd_list_msg_t **msg, Buf buffer)
 {
 	int i;
 	uint32_t count;
 	dbd_list_msg_t *msg_ptr = NULL;
 	void *object = NULL;
-	int (*my_function) (void **object, Buf buffer);
+	int (*my_function) (void **object, uint16_t rpc_version, Buf buffer);
 	void (*my_destroy) (void *object);
 
 	switch(type) {
@@ -2514,7 +2699,8 @@ int inline slurmdbd_unpack_list_msg(slurmdbd_msg_type_t type,
 		*/
 		msg_ptr->my_list = list_create((*(my_destroy)));
 		for(i=0; i<count; i++) {
-			if(((*(my_function))(&object, buffer)) == SLURM_ERROR)
+			if(((*(my_function))(&object, rpc_version, buffer))
+			   == SLURM_ERROR)
 				goto unpack_error;
 			list_append(msg_ptr->my_list, object);
 		}
@@ -2522,16 +2708,17 @@ int inline slurmdbd_unpack_list_msg(slurmdbd_msg_type_t type,
 	return SLURM_SUCCESS;
 
 unpack_error:
-	slurmdbd_free_list_msg(msg_ptr);
+	slurmdbd_free_list_msg(rpc_version, msg_ptr);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
 
-void inline slurmdbd_pack_modify_msg(slurmdbd_msg_type_t type,
+void inline slurmdbd_pack_modify_msg(uint16_t rpc_version,
+				     slurmdbd_msg_type_t type,
 				     dbd_modify_msg_t *msg, Buf buffer)
 {
-	void (*my_cond) (void *object, Buf buffer);
-	void (*my_rec) (void *object, Buf buffer);
+	void (*my_cond) (void *object, uint16_t rpc_version, Buf buffer);
+	void (*my_rec) (void *object, uint16_t rpc_version, Buf buffer);
 
 	switch(type) {
 	case DBD_MODIFY_ACCOUNTS:
@@ -2554,16 +2741,17 @@ void inline slurmdbd_pack_modify_msg(slurmdbd_msg_type_t type,
 		fatal("Unknown pack type");
 		return;
 	}
-	(*(my_cond))(msg->cond, buffer);
-	(*(my_rec))(msg->rec, buffer);
+	(*(my_cond))(msg->cond, rpc_version, buffer);
+	(*(my_rec))(msg->rec, rpc_version, buffer);
 }
 
-int inline slurmdbd_unpack_modify_msg(slurmdbd_msg_type_t type,
+int inline slurmdbd_unpack_modify_msg(uint16_t rpc_version, 
+				      slurmdbd_msg_type_t type,
 				      dbd_modify_msg_t **msg, Buf buffer)
 {
 	dbd_modify_msg_t *msg_ptr = NULL;
-	int (*my_cond) (void **object, Buf buffer);
-	int (*my_rec) (void **object, Buf buffer);
+	int (*my_cond) (void **object, uint16_t rpc_version, Buf buffer);
+	int (*my_rec) (void **object, uint16_t rpc_version, Buf buffer);
 
 	msg_ptr = xmalloc(sizeof(dbd_modify_msg_t));
 	*msg = msg_ptr;
@@ -2590,21 +2778,22 @@ int inline slurmdbd_unpack_modify_msg(slurmdbd_msg_type_t type,
 		return SLURM_ERROR;
 	}
 
-	if((*(my_cond))(&msg_ptr->cond, buffer) == SLURM_ERROR)
+	if((*(my_cond))(&msg_ptr->cond, rpc_version, buffer) == SLURM_ERROR)
 		goto unpack_error;
-	if((*(my_rec))(&msg_ptr->rec, buffer) == SLURM_ERROR)
+	if((*(my_rec))(&msg_ptr->rec, rpc_version, buffer) == SLURM_ERROR)
 		goto unpack_error;
 	
 	return SLURM_SUCCESS;
 
 unpack_error:
-	slurmdbd_free_modify_msg(type, msg_ptr);
+	slurmdbd_free_modify_msg(rpc_version, type, msg_ptr);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
 
 void inline 
-slurmdbd_pack_node_state_msg(dbd_node_state_msg_t *msg, Buf buffer)
+slurmdbd_pack_node_state_msg(uint16_t rpc_version,
+			     dbd_node_state_msg_t *msg, Buf buffer)
 {
 	packstr(msg->cluster_name, buffer);
 	pack32(msg->cpu_count, buffer);
@@ -2615,7 +2804,8 @@ slurmdbd_pack_node_state_msg(dbd_node_state_msg_t *msg, Buf buffer)
 }
 
 int inline
-slurmdbd_unpack_node_state_msg(dbd_node_state_msg_t **msg, Buf buffer)
+slurmdbd_unpack_node_state_msg(uint16_t rpc_version,
+			       dbd_node_state_msg_t **msg, Buf buffer)
 {
 	dbd_node_state_msg_t *msg_ptr;
 	uint32_t uint32_tmp;
@@ -2631,13 +2821,14 @@ slurmdbd_unpack_node_state_msg(dbd_node_state_msg_t **msg, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
-	slurmdbd_free_node_state_msg(msg_ptr);
+	slurmdbd_free_node_state_msg(rpc_version, msg_ptr);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
 
 void inline 
-slurmdbd_pack_rc_msg(dbd_rc_msg_t *msg, Buf buffer)
+slurmdbd_pack_rc_msg(uint16_t rpc_version,
+		     dbd_rc_msg_t *msg, Buf buffer)
 {
 	packstr(msg->comment, buffer);
 	pack32(msg->return_code, buffer);
@@ -2645,7 +2836,8 @@ slurmdbd_pack_rc_msg(dbd_rc_msg_t *msg, Buf buffer)
 }
 
 int inline 
-slurmdbd_unpack_rc_msg(dbd_rc_msg_t **msg, Buf buffer)
+slurmdbd_unpack_rc_msg(uint16_t rpc_version,
+		       dbd_rc_msg_t **msg, Buf buffer)
 {
 	uint32_t uint32_tmp;
 	dbd_rc_msg_t *msg_ptr = xmalloc(sizeof(dbd_rc_msg_t));
@@ -2656,20 +2848,22 @@ slurmdbd_unpack_rc_msg(dbd_rc_msg_t **msg, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
-	slurmdbd_free_rc_msg(msg_ptr);
+	slurmdbd_free_rc_msg(rpc_version, msg_ptr);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
 
 void inline 
-slurmdbd_pack_register_ctld_msg(dbd_register_ctld_msg_t *msg, Buf buffer)
+slurmdbd_pack_register_ctld_msg(uint16_t rpc_version,
+				dbd_register_ctld_msg_t *msg, Buf buffer)
 {
 	packstr(msg->cluster_name, buffer);
 	pack16(msg->port, buffer);
 }
 
 int inline 
-slurmdbd_unpack_register_ctld_msg(dbd_register_ctld_msg_t **msg, Buf buffer)
+slurmdbd_unpack_register_ctld_msg(uint16_t rpc_version,
+				  dbd_register_ctld_msg_t **msg, Buf buffer)
 {
 	uint32_t uint32_tmp;
 	dbd_register_ctld_msg_t *msg_ptr = xmalloc(
@@ -2680,19 +2874,21 @@ slurmdbd_unpack_register_ctld_msg(dbd_register_ctld_msg_t **msg, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
-	slurmdbd_free_register_ctld_msg(msg_ptr);
+	slurmdbd_free_register_ctld_msg(rpc_version, msg_ptr);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
 
 void inline 
-slurmdbd_pack_roll_usage_msg(dbd_roll_usage_msg_t *msg, Buf buffer)
+slurmdbd_pack_roll_usage_msg(uint16_t rpc_version,
+			     dbd_roll_usage_msg_t *msg, Buf buffer)
 {
 	pack_time(msg->start, buffer);
 }
 
 int inline 
-slurmdbd_unpack_roll_usage_msg(dbd_roll_usage_msg_t **msg, Buf buffer)
+slurmdbd_unpack_roll_usage_msg(uint16_t rpc_version,
+			       dbd_roll_usage_msg_t **msg, Buf buffer)
 {
 	dbd_roll_usage_msg_t *msg_ptr = xmalloc(sizeof(dbd_roll_usage_msg_t));
 
@@ -2701,13 +2897,14 @@ slurmdbd_unpack_roll_usage_msg(dbd_roll_usage_msg_t **msg, Buf buffer)
 	return SLURM_SUCCESS;
 	
 unpack_error:
-	slurmdbd_free_roll_usage_msg(msg_ptr);
+	slurmdbd_free_roll_usage_msg(rpc_version, msg_ptr);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
 
 void inline 
-slurmdbd_pack_step_complete_msg(dbd_step_comp_msg_t *msg, Buf buffer)
+slurmdbd_pack_step_complete_msg(uint16_t rpc_version,
+				dbd_step_comp_msg_t *msg, Buf buffer)
 {
 	pack32(msg->assoc_id, buffer);
 	pack32(msg->db_index, buffer);
@@ -2723,7 +2920,8 @@ slurmdbd_pack_step_complete_msg(dbd_step_comp_msg_t *msg, Buf buffer)
 }
 
 int inline 
-slurmdbd_unpack_step_complete_msg(dbd_step_comp_msg_t **msg, Buf buffer)
+slurmdbd_unpack_step_complete_msg(uint16_t rpc_version,
+				  dbd_step_comp_msg_t **msg, Buf buffer)
 {
 	dbd_step_comp_msg_t *msg_ptr = xmalloc(sizeof(dbd_step_comp_msg_t));
 	*msg = msg_ptr;
@@ -2741,13 +2939,14 @@ slurmdbd_unpack_step_complete_msg(dbd_step_comp_msg_t **msg, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
-	slurmdbd_free_step_complete_msg(msg_ptr);
+	slurmdbd_free_step_complete_msg(rpc_version, msg_ptr);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
 
 void inline 
-slurmdbd_pack_step_start_msg(dbd_step_start_msg_t *msg, Buf buffer)
+slurmdbd_pack_step_start_msg(uint16_t rpc_version, dbd_step_start_msg_t *msg,
+			     Buf buffer)
 {
 	pack32(msg->assoc_id, buffer);
 	pack32(msg->db_index, buffer);
@@ -2761,7 +2960,8 @@ slurmdbd_pack_step_start_msg(dbd_step_start_msg_t *msg, Buf buffer)
 }
 
 int inline 
-slurmdbd_unpack_step_start_msg(dbd_step_start_msg_t **msg, Buf buffer)
+slurmdbd_unpack_step_start_msg(uint16_t rpc_version,
+			       dbd_step_start_msg_t **msg, Buf buffer)
 {
 	uint32_t uint32_tmp;
 	dbd_step_start_msg_t *msg_ptr = xmalloc(sizeof(dbd_step_start_msg_t));
@@ -2778,15 +2978,16 @@ slurmdbd_unpack_step_start_msg(dbd_step_start_msg_t **msg, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
-	slurmdbd_free_step_start_msg(msg_ptr);
+	slurmdbd_free_step_start_msg(rpc_version, msg_ptr);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
 
-void inline slurmdbd_pack_usage_msg(slurmdbd_msg_type_t type,
+void inline slurmdbd_pack_usage_msg(uint16_t rpc_version,
+				    slurmdbd_msg_type_t type,
 				    dbd_usage_msg_t *msg, Buf buffer)
 {
-	void (*my_rec) (void *object, Buf buffer);
+	void (*my_rec) (void *object, uint16_t rpc_version, Buf buffer);
 
 	switch(type) {
 	case DBD_GET_ASSOC_USAGE:
@@ -2802,16 +3003,17 @@ void inline slurmdbd_pack_usage_msg(slurmdbd_msg_type_t type,
 		return;
 	}
 	
-	(*(my_rec))(msg->rec, buffer);
+	(*(my_rec))(msg->rec, rpc_version, buffer);
 	pack_time(msg->start, buffer);
 	pack_time(msg->end, buffer);
 }
 
-int inline slurmdbd_unpack_usage_msg(slurmdbd_msg_type_t type,
+int inline slurmdbd_unpack_usage_msg(uint16_t rpc_version,
+				     slurmdbd_msg_type_t type,
 				     dbd_usage_msg_t **msg, Buf buffer)
 {
 	dbd_usage_msg_t *msg_ptr = NULL;
-	int (*my_rec) (void **object, Buf buffer);
+	int (*my_rec) (void **object, uint16_t rpc_version, Buf buffer);
 
 	msg_ptr = xmalloc(sizeof(dbd_usage_msg_t));
 	*msg = msg_ptr;
@@ -2830,7 +3032,7 @@ int inline slurmdbd_unpack_usage_msg(slurmdbd_msg_type_t type,
 		return SLURM_ERROR;
 	}
 
-	if((*(my_rec))(&msg_ptr->rec, buffer) == SLURM_ERROR)
+	if((*(my_rec))(&msg_ptr->rec, rpc_version, buffer) == SLURM_ERROR)
 		goto unpack_error;
 
 	unpack_time(&msg_ptr->start, buffer);
@@ -2840,7 +3042,7 @@ int inline slurmdbd_unpack_usage_msg(slurmdbd_msg_type_t type,
 	return SLURM_SUCCESS;
 
 unpack_error:
-	slurmdbd_free_usage_msg(type, msg_ptr);
+	slurmdbd_free_usage_msg(rpc_version, type, msg_ptr);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
diff --git a/src/common/slurmdbd_defs.h b/src/common/slurmdbd_defs.h
index 4ddc04264..b59f2509d 100644
--- a/src/common/slurmdbd_defs.h
+++ b/src/common/slurmdbd_defs.h
@@ -84,7 +84,7 @@
  *	communicating with it (e.g. it will not accept messages with a
  *	version higher than SLURMDBD_VERSION).
  */
-#define SLURMDBD_VERSION	02
+#define SLURMDBD_VERSION	03
 #define SLURMDBD_VERSION_MIN	02
 
 /* SLURM DBD message types */
@@ -142,7 +142,8 @@ typedef enum {
 	DBD_ADD_QOS,		/* Add QOS information   	        */
 	DBD_GET_QOS,		/* Get QOS information   	        */
 	DBD_GOT_QOS,		/* Got QOS information   	        */
-	DBD_REMOVE_QOS		/* Remove QOS information   	        */
+	DBD_REMOVE_QOS,		/* Remove QOS information   	        */
+	DBD_MODIFY_QOS,         /* Modify existing QOS                  */
 } slurmdbd_msg_type_t;
 
 /*****************************************************************************\
@@ -224,6 +225,7 @@ typedef struct dbd_job_start_msg {
 				 * with associations */
 	uint32_t alloc_cpus;	/* count of allocated processors */
 	uint32_t assoc_id;	/* accounting association id */
+	char *   cluster;       /* cluster job is being ran on */
 	char *   block_id;      /* Bluegene block id */
 	uint32_t db_index;	/* index into the db for this job */
 	time_t   eligible_time;	/* time job becomes eligible to run */
@@ -317,6 +319,10 @@ typedef struct dbd_step_start_msg {
 	uint32_t total_procs;	/* count of allocated processors */
 } dbd_step_start_msg_t;
 
+/* flag to let us know if we are running on cache or from the actual
+ * database */
+extern bool running_cache;
+
 /*****************************************************************************\
  * Slurm DBD message processing functions
 \*****************************************************************************/
@@ -337,22 +343,26 @@ extern int slurm_close_slurmdbd_conn();
  * NOTE: slurm_open_slurmdbd_conn() must have been called with make_agent set
  * 
  * Returns SLURM_SUCCESS or an error code */
-extern int slurm_send_slurmdbd_msg(slurmdbd_msg_t *req);
+extern int slurm_send_slurmdbd_msg(uint16_t rpc_version,
+				   slurmdbd_msg_t *req);
 
 /* Send an RPC to the SlurmDBD and wait for an arbitrary reply message.
  * The RPC will not be queued if an error occurs.
  * The "resp" message must be freed by the caller.
  * Returns SLURM_SUCCESS or an error code */
-extern int slurm_send_recv_slurmdbd_msg(slurmdbd_msg_t *req, 
+extern int slurm_send_recv_slurmdbd_msg(uint16_t rpc_version,
+					slurmdbd_msg_t *req, 
 					slurmdbd_msg_t *resp);
 
 /* Send an RPC to the SlurmDBD and wait for the return code reply.
  * The RPC will not be queued if an error occurs.
  * Returns SLURM_SUCCESS or an error code */
-extern int slurm_send_slurmdbd_recv_rc_msg(slurmdbd_msg_t *req, int *rc);
+extern int slurm_send_slurmdbd_recv_rc_msg(uint16_t rpc_version,
+					   slurmdbd_msg_t *req, int *rc);
 
-extern Buf pack_slurmdbd_msg(slurmdbd_msg_t *req);
-extern int unpack_slurmdbd_msg(slurmdbd_msg_t *resp, Buf buffer);
+extern Buf pack_slurmdbd_msg(uint16_t rpc_version, slurmdbd_msg_t *req);
+extern int unpack_slurmdbd_msg(uint16_t rpc_version, 
+			       slurmdbd_msg_t *resp, Buf buffer);
 
 extern slurmdbd_msg_type_t str_2_slurmdbd_msg_type(char *msg_type);
 extern char *slurmdbd_msg_type_2_str(slurmdbd_msg_type_t msg_type,
@@ -361,104 +371,161 @@ extern char *slurmdbd_msg_type_2_str(slurmdbd_msg_type_t msg_type,
 /*****************************************************************************\
  * Free various SlurmDBD message structures
 \*****************************************************************************/
-void inline slurmdbd_free_acct_coord_msg(dbd_acct_coord_msg_t *msg);
-void inline slurmdbd_free_cluster_procs_msg(dbd_cluster_procs_msg_t *msg);
-void inline slurmdbd_free_cond_msg(slurmdbd_msg_type_t type,
+void inline slurmdbd_free_acct_coord_msg(uint16_t rpc_version, 
+					 dbd_acct_coord_msg_t *msg);
+void inline slurmdbd_free_cluster_procs_msg(uint16_t rpc_version, 
+					    dbd_cluster_procs_msg_t *msg);
+void inline slurmdbd_free_cond_msg(uint16_t rpc_version, 
+				   slurmdbd_msg_type_t type,
 				   dbd_cond_msg_t *msg);
-void inline slurmdbd_free_get_jobs_msg(dbd_get_jobs_msg_t *msg);
-void inline slurmdbd_free_init_msg(dbd_init_msg_t *msg);
-void inline slurmdbd_free_fini_msg(dbd_fini_msg_t *msg);
-void inline slurmdbd_free_job_complete_msg(dbd_job_comp_msg_t *msg);
-void inline slurmdbd_free_job_start_msg(dbd_job_start_msg_t *msg);
-void inline slurmdbd_free_job_start_rc_msg(dbd_job_start_rc_msg_t *msg);
-void inline slurmdbd_free_job_suspend_msg(dbd_job_suspend_msg_t *msg);
-void inline slurmdbd_free_list_msg(dbd_list_msg_t *msg);
-void inline slurmdbd_free_modify_msg(slurmdbd_msg_type_t type,
-				      dbd_modify_msg_t *msg);
-void inline slurmdbd_free_node_state_msg(dbd_node_state_msg_t *msg);
-void inline slurmdbd_free_rc_msg(dbd_rc_msg_t *msg);
-void inline slurmdbd_free_register_ctld_msg(dbd_register_ctld_msg_t *msg);
-void inline slurmdbd_free_roll_usage_msg(dbd_roll_usage_msg_t *msg);
-void inline slurmdbd_free_step_complete_msg(dbd_step_comp_msg_t *msg);
-void inline slurmdbd_free_step_start_msg(dbd_step_start_msg_t *msg);
-void inline slurmdbd_free_usage_msg(slurmdbd_msg_type_t type,
+void inline slurmdbd_free_get_jobs_msg(uint16_t rpc_version, 
+				       dbd_get_jobs_msg_t *msg);
+void inline slurmdbd_free_init_msg(uint16_t rpc_version, 
+				   dbd_init_msg_t *msg);
+void inline slurmdbd_free_fini_msg(uint16_t rpc_version, 
+				   dbd_fini_msg_t *msg);
+void inline slurmdbd_free_job_complete_msg(uint16_t rpc_version, 
+					   dbd_job_comp_msg_t *msg);
+void inline slurmdbd_free_job_start_msg(uint16_t rpc_version, 
+					dbd_job_start_msg_t *msg);
+void inline slurmdbd_free_job_start_rc_msg(uint16_t rpc_version, 
+					   dbd_job_start_rc_msg_t *msg);
+void inline slurmdbd_free_job_suspend_msg(uint16_t rpc_version, 
+					  dbd_job_suspend_msg_t *msg);
+void inline slurmdbd_free_list_msg(uint16_t rpc_version, 
+				   dbd_list_msg_t *msg);
+void inline slurmdbd_free_modify_msg(uint16_t rpc_version, 
+				     slurmdbd_msg_type_t type,
+				     dbd_modify_msg_t *msg);
+void inline slurmdbd_free_node_state_msg(uint16_t rpc_version, 
+					 dbd_node_state_msg_t *msg);
+void inline slurmdbd_free_rc_msg(uint16_t rpc_version, 
+				 dbd_rc_msg_t *msg);
+void inline slurmdbd_free_register_ctld_msg(uint16_t rpc_version, 
+					    dbd_register_ctld_msg_t *msg);
+void inline slurmdbd_free_roll_usage_msg(uint16_t rpc_version, 
+					 dbd_roll_usage_msg_t *msg);
+void inline slurmdbd_free_step_complete_msg(uint16_t rpc_version, 
+					    dbd_step_comp_msg_t *msg);
+void inline slurmdbd_free_step_start_msg(uint16_t rpc_version, 
+					 dbd_step_start_msg_t *msg);
+void inline slurmdbd_free_usage_msg(uint16_t rpc_version, 
+				    slurmdbd_msg_type_t type,
 				    dbd_usage_msg_t *msg);
 
 /*****************************************************************************\
  * Pack various SlurmDBD message structures into a buffer
 \*****************************************************************************/
-void inline slurmdbd_pack_acct_coord_msg(dbd_acct_coord_msg_t *msg,
-					  Buf buffer);
-void inline slurmdbd_pack_cluster_procs_msg(dbd_cluster_procs_msg_t *msg,
-					     Buf buffer);
-void inline slurmdbd_pack_cond_msg(slurmdbd_msg_type_t type,
-				    dbd_cond_msg_t *msg, Buf buffer);
-void inline slurmdbd_pack_get_jobs_msg(dbd_get_jobs_msg_t *msg, Buf buffer);
-void inline slurmdbd_pack_init_msg(dbd_init_msg_t *msg, Buf buffer,
-				    char *auth_info);
-void inline slurmdbd_pack_fini_msg(dbd_fini_msg_t *msg, Buf buffer);
-void inline slurmdbd_pack_job_complete_msg(dbd_job_comp_msg_t *msg,
-					    Buf buffer);
-void inline slurmdbd_pack_job_start_msg(dbd_job_start_msg_t *msg,
+void inline slurmdbd_pack_acct_coord_msg(uint16_t rpc_version, 
+					 dbd_acct_coord_msg_t *msg,
 					 Buf buffer);
-void inline slurmdbd_pack_job_start_rc_msg(dbd_job_start_rc_msg_t *msg,
+void inline slurmdbd_pack_cluster_procs_msg(uint16_t rpc_version, 
+					    dbd_cluster_procs_msg_t *msg,
 					    Buf buffer);
-void inline slurmdbd_pack_job_suspend_msg(dbd_job_suspend_msg_t *msg,
+void inline slurmdbd_pack_cond_msg(uint16_t rpc_version, 
+				   slurmdbd_msg_type_t type,
+				   dbd_cond_msg_t *msg, Buf buffer);
+void inline slurmdbd_pack_get_jobs_msg(uint16_t rpc_version, 
+				       dbd_get_jobs_msg_t *msg, Buf buffer);
+void inline slurmdbd_pack_init_msg(uint16_t rpc_version, 
+				   dbd_init_msg_t *msg, Buf buffer,
+				   char *auth_info);
+void inline slurmdbd_pack_fini_msg(uint16_t rpc_version, 
+				   dbd_fini_msg_t *msg, Buf buffer);
+void inline slurmdbd_pack_job_complete_msg(uint16_t rpc_version, 
+					   dbd_job_comp_msg_t *msg,
 					   Buf buffer);
-void inline slurmdbd_pack_list_msg(slurmdbd_msg_type_t type,
-				    dbd_list_msg_t *msg, Buf buffer);
-void inline slurmdbd_pack_modify_msg(slurmdbd_msg_type_t type,
-				      dbd_modify_msg_t *msg, Buf buffer);
-void inline slurmdbd_pack_node_state_msg(dbd_node_state_msg_t *msg,
+void inline slurmdbd_pack_job_start_msg(uint16_t rpc_version, 
+					dbd_job_start_msg_t *msg,
+					Buf buffer);
+void inline slurmdbd_pack_job_start_rc_msg(uint16_t rpc_version, 
+					   dbd_job_start_rc_msg_t *msg,
+					   Buf buffer);
+void inline slurmdbd_pack_job_suspend_msg(uint16_t rpc_version, 
+					  dbd_job_suspend_msg_t *msg,
 					  Buf buffer);
-void inline slurmdbd_pack_rc_msg(dbd_rc_msg_t *msg, Buf buffer);
-void inline slurmdbd_pack_register_ctld_msg(dbd_register_ctld_msg_t *msg,
+void inline slurmdbd_pack_list_msg(uint16_t rpc_version, 
+				   slurmdbd_msg_type_t type,
+				   dbd_list_msg_t *msg, Buf buffer);
+void inline slurmdbd_pack_modify_msg(uint16_t rpc_version, 
+				     slurmdbd_msg_type_t type,
+				     dbd_modify_msg_t *msg, Buf buffer);
+void inline slurmdbd_pack_node_state_msg(uint16_t rpc_version, 
+					 dbd_node_state_msg_t *msg,
+					 Buf buffer);
+void inline slurmdbd_pack_rc_msg(uint16_t rpc_version, 
+				 dbd_rc_msg_t *msg, Buf buffer);
+void inline slurmdbd_pack_register_ctld_msg(uint16_t rpc_version, 
+					    dbd_register_ctld_msg_t *msg,
 					    Buf buffer);
-void inline slurmdbd_pack_roll_usage_msg(dbd_roll_usage_msg_t *msg, Buf buffer);
-void inline slurmdbd_pack_step_complete_msg(dbd_step_comp_msg_t *msg,
-					     Buf buffer);
-void inline slurmdbd_pack_step_start_msg(dbd_step_start_msg_t *msg,
-					  Buf buffer);
-void inline slurmdbd_pack_usage_msg(slurmdbd_msg_type_t type,
+void inline slurmdbd_pack_roll_usage_msg(uint16_t rpc_version, 
+					 dbd_roll_usage_msg_t *msg, Buf buffer);
+void inline slurmdbd_pack_step_complete_msg(uint16_t rpc_version, 
+					    dbd_step_comp_msg_t *msg,
+					    Buf buffer);
+void inline slurmdbd_pack_step_start_msg(uint16_t rpc_version, 
+					 dbd_step_start_msg_t *msg,
+					 Buf buffer);
+void inline slurmdbd_pack_usage_msg(uint16_t rpc_version, 
+				    slurmdbd_msg_type_t type,
 				    dbd_usage_msg_t *msg, Buf buffer);
 
 /*****************************************************************************\
  * Unpack various SlurmDBD message structures from a buffer
 \*****************************************************************************/
-int inline slurmdbd_unpack_acct_coord_msg(dbd_acct_coord_msg_t **msg,
-					   Buf buffer);
-int inline slurmdbd_unpack_cluster_procs_msg(dbd_cluster_procs_msg_t **msg,
-					      Buf buffer);
-int inline slurmdbd_unpack_cond_msg(slurmdbd_msg_type_t type,
-				     dbd_cond_msg_t **msg, Buf buffer);
-int inline slurmdbd_unpack_get_jobs_msg(dbd_get_jobs_msg_t **msg, Buf buffer);
-int inline slurmdbd_unpack_init_msg(dbd_init_msg_t **msg, Buf buffer,
-				    char *auth_info);
-int inline slurmdbd_unpack_fini_msg(dbd_fini_msg_t **msg, Buf buffer);
-int inline slurmdbd_unpack_job_complete_msg(dbd_job_comp_msg_t **msg,
-					     Buf buffer);
-int inline slurmdbd_unpack_job_start_msg(dbd_job_start_msg_t **msg,
+int inline slurmdbd_unpack_acct_coord_msg(uint16_t rpc_version, 
+					  dbd_acct_coord_msg_t **msg,
 					  Buf buffer);
-int inline slurmdbd_unpack_job_start_rc_msg(dbd_job_start_rc_msg_t **msg,
+int inline slurmdbd_unpack_cluster_procs_msg(uint16_t rpc_version, 
+					     dbd_cluster_procs_msg_t **msg,
 					     Buf buffer);
-int inline slurmdbd_unpack_job_suspend_msg(dbd_job_suspend_msg_t **msg,
+int inline slurmdbd_unpack_cond_msg(uint16_t rpc_version, 
+				    slurmdbd_msg_type_t type,
+				    dbd_cond_msg_t **msg, Buf buffer);
+int inline slurmdbd_unpack_get_jobs_msg(uint16_t rpc_version, 
+					dbd_get_jobs_msg_t **msg, Buf buffer);
+int inline slurmdbd_unpack_init_msg(uint16_t rpc_version, 
+				    dbd_init_msg_t **msg, Buf buffer,
+				    char *auth_info);
+int inline slurmdbd_unpack_fini_msg(uint16_t rpc_version, 
+				    dbd_fini_msg_t **msg, Buf buffer);
+int inline slurmdbd_unpack_job_complete_msg(uint16_t rpc_version, 
+					    dbd_job_comp_msg_t **msg,
+					    Buf buffer);
+int inline slurmdbd_unpack_job_start_msg(uint16_t rpc_version, 
+					 dbd_job_start_msg_t **msg,
+					 Buf buffer);
+int inline slurmdbd_unpack_job_start_rc_msg(uint16_t rpc_version, 
+					    dbd_job_start_rc_msg_t **msg,
 					    Buf buffer);
-int inline slurmdbd_unpack_list_msg(slurmdbd_msg_type_t type,
-				     dbd_list_msg_t **msg, Buf buffer);
-int inline slurmdbd_unpack_modify_msg(slurmdbd_msg_type_t type,
-				       dbd_modify_msg_t **msg, Buf buffer);
-int inline slurmdbd_unpack_node_state_msg(dbd_node_state_msg_t **msg,
+int inline slurmdbd_unpack_job_suspend_msg(uint16_t rpc_version, 
+					   dbd_job_suspend_msg_t **msg,
 					   Buf buffer);
-int inline slurmdbd_unpack_rc_msg(dbd_rc_msg_t **msg, Buf buffer);
-int inline slurmdbd_unpack_register_ctld_msg(dbd_register_ctld_msg_t **msg, 
+int inline slurmdbd_unpack_list_msg(uint16_t rpc_version, 
+				    slurmdbd_msg_type_t type,
+				    dbd_list_msg_t **msg, Buf buffer);
+int inline slurmdbd_unpack_modify_msg(uint16_t rpc_version, 
+				      slurmdbd_msg_type_t type,
+				      dbd_modify_msg_t **msg, Buf buffer);
+int inline slurmdbd_unpack_node_state_msg(uint16_t rpc_version, 
+					  dbd_node_state_msg_t **msg,
+					  Buf buffer);
+int inline slurmdbd_unpack_rc_msg(uint16_t rpc_version, 
+				  dbd_rc_msg_t **msg, Buf buffer);
+int inline slurmdbd_unpack_register_ctld_msg(uint16_t rpc_version, 
+					     dbd_register_ctld_msg_t **msg, 
 					     Buf buffer);
-int inline slurmdbd_unpack_roll_usage_msg(dbd_roll_usage_msg_t **msg,
+int inline slurmdbd_unpack_roll_usage_msg(uint16_t rpc_version, 
+					  dbd_roll_usage_msg_t **msg,
 					  Buf buffer);
-int inline slurmdbd_unpack_step_complete_msg(dbd_step_comp_msg_t **msg,
+int inline slurmdbd_unpack_step_complete_msg(uint16_t rpc_version, 
+					     dbd_step_comp_msg_t **msg,
 					     Buf buffer);
-int inline slurmdbd_unpack_step_start_msg(dbd_step_start_msg_t **msg,
+int inline slurmdbd_unpack_step_start_msg(uint16_t rpc_version, 
+					  dbd_step_start_msg_t **msg,
 					  Buf buffer);
-int inline slurmdbd_unpack_usage_msg(slurmdbd_msg_type_t type,
+int inline slurmdbd_unpack_usage_msg(uint16_t rpc_version, 
+				     slurmdbd_msg_type_t type,
 				     dbd_usage_msg_t **msg,
 				     Buf buffer);
 
diff --git a/src/common/xstring.c b/src/common/xstring.c
index ec6652814..94d0a181d 100644
--- a/src/common/xstring.c
+++ b/src/common/xstring.c
@@ -82,6 +82,7 @@ strong_alias(xstrdup_printf,	slurm_xstrdup_printf);
 strong_alias(xstrndup,		slurm_xstrndup);
 strong_alias(xbasename,		slurm_xbasename);
 strong_alias(_xstrsubstitute,   slurm_xstrsubstitute);
+strong_alias(xstrstrip,         slurm_xstrstrip);
 strong_alias(xshort_hostname,   slurm_xshort_hostname);
 strong_alias(xstring_is_whitespace, slurm_xstring_is_whitespace);
 
@@ -401,6 +402,47 @@ void _xstrsubstitute(char **str, const char *pattern, const char *replacement)
 	xfree(end_copy);
 }
 
+/* 
+ * Remove first instance of quotes that surround a string in "str",
+ *   and return the result without the quotes
+ *   str (IN)	        target string (pointer to in case of expansion)
+ *   increased (IN/OUT)	current position in "str"
+ *   RET char *         str returned without quotes in it. needs to be xfreed
+ */
+char *xstrstrip(char *str)
+{
+	int i=0, start=0, found = 0;
+	char *meat = NULL;
+	char quote_c = '\0';
+	int quote = 0;
+
+	if(!str)
+		return NULL;
+
+	/* first strip off the ("|')'s */
+	if (str[i] == '\"' || str[i] == '\'') {
+		quote_c = str[i];
+		quote = 1;
+		i++;
+	}
+	start = i;
+
+	while(str[i]) {
+		if(quote && str[i] == quote_c) {
+			found = 1;
+			break;		
+		}
+		i++;
+	}
+	if(found) {
+		meat = xmalloc((i-start)+1);
+		memcpy(meat, str+start, (i-start));
+	} else
+		meat = xstrdup(str);
+	return meat;
+}
+
+
 /* xshort_hostname
  *   Returns an xmalloc'd string containing the hostname
  *   of the local machine.  The hostname contains only
diff --git a/src/common/xstring.h b/src/common/xstring.h
index 0345b5adb..493819a49 100644
--- a/src/common/xstring.h
+++ b/src/common/xstring.h
@@ -124,6 +124,14 @@ char *xbasename(char *path);
 */
 void _xstrsubstitute(char **str, const char *pattern, const char *replacement);
 
+/* 
+ * Remove all quotes that surround a string in the string "str",
+ *   str (IN)	        target string (pointer to in case of expansion)
+ *   increased (IN/OUT)	current position in "str"
+ *   RET char *         str returned without quotes in it. needs to be xfreed
+ */
+char *xstrstrip(char *str);
+
 /* xshort_hostname
  *   Returns an xmalloc'd string containing the hostname
  *   of the local machine.  The hostname contains only
diff --git a/src/database/Makefile.am b/src/database/Makefile.am
index 31e2b7c0c..dadae58c3 100644
--- a/src/database/Makefile.am
+++ b/src/database/Makefile.am
@@ -4,27 +4,10 @@ AUTOMAKE_OPTIONS = foreign
 
 INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common
 
-if HAVE_OPENSSL
-
-noinst_LTLIBRARIES =      \
-	libslurm_mysql.la \
-	libslurm_pgsql.la \
-	libslurm_gold.la
-
-libslurm_gold_la_SOURCES = gold_interface.c gold_interface.h \
-	base64.c base64.h
-libslurm_gold_la_LIBADD   = $(SSL_LIBS)
-libslurm_gold_la_LDFLAGS  = $(LIB_LDFLAGS) $(SSL_LDFLAGS)
-libslurm_gold_la_CFLAGS   = $(SSL_CPPFLAGS)
-
-else 
-
 noinst_LTLIBRARIES =      \
 	libslurm_mysql.la \
 	libslurm_pgsql.la
 
-endif
-
 libslurm_mysql_la_SOURCES = mysql_common.c mysql_common.h
 libslurm_pgsql_la_SOURCES = pgsql_common.c pgsql_common.h
 
diff --git a/src/database/Makefile.in b/src/database/Makefile.in
index 2861f5f4e..6afcf8b7b 100644
--- a/src/database/Makefile.in
+++ b/src/database/Makefile.in
@@ -68,18 +68,6 @@ CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
 CONFIG_CLEAN_FILES =
 LTLIBRARIES = $(noinst_LTLIBRARIES)
 am__DEPENDENCIES_1 =
-@HAVE_OPENSSL_TRUE@libslurm_gold_la_DEPENDENCIES =  \
-@HAVE_OPENSSL_TRUE@	$(am__DEPENDENCIES_1)
-am__libslurm_gold_la_SOURCES_DIST = gold_interface.c gold_interface.h \
-	base64.c base64.h
-@HAVE_OPENSSL_TRUE@am_libslurm_gold_la_OBJECTS =  \
-@HAVE_OPENSSL_TRUE@	libslurm_gold_la-gold_interface.lo \
-@HAVE_OPENSSL_TRUE@	libslurm_gold_la-base64.lo
-libslurm_gold_la_OBJECTS = $(am_libslurm_gold_la_OBJECTS)
-libslurm_gold_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
-	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(libslurm_gold_la_CFLAGS) \
-	$(CFLAGS) $(libslurm_gold_la_LDFLAGS) $(LDFLAGS) -o $@
-@HAVE_OPENSSL_TRUE@am_libslurm_gold_la_rpath =
 libslurm_mysql_la_DEPENDENCIES = $(am__DEPENDENCIES_1)
 am_libslurm_mysql_la_OBJECTS = libslurm_mysql_la-mysql_common.lo
 libslurm_mysql_la_OBJECTS = $(am_libslurm_mysql_la_OBJECTS)
@@ -87,8 +75,6 @@ libslurm_mysql_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
 	$(LIBTOOLFLAGS) --mode=link $(CCLD) \
 	$(libslurm_mysql_la_CFLAGS) $(CFLAGS) \
 	$(libslurm_mysql_la_LDFLAGS) $(LDFLAGS) -o $@
-@HAVE_OPENSSL_FALSE@am_libslurm_mysql_la_rpath =
-@HAVE_OPENSSL_TRUE@am_libslurm_mysql_la_rpath =
 libslurm_pgsql_la_DEPENDENCIES = $(am__DEPENDENCIES_1)
 am_libslurm_pgsql_la_OBJECTS = libslurm_pgsql_la-pgsql_common.lo
 libslurm_pgsql_la_OBJECTS = $(am_libslurm_pgsql_la_OBJECTS)
@@ -96,8 +82,6 @@ libslurm_pgsql_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
 	$(LIBTOOLFLAGS) --mode=link $(CCLD) \
 	$(libslurm_pgsql_la_CFLAGS) $(CFLAGS) \
 	$(libslurm_pgsql_la_LDFLAGS) $(LDFLAGS) -o $@
-@HAVE_OPENSSL_FALSE@am_libslurm_pgsql_la_rpath =
-@HAVE_OPENSSL_TRUE@am_libslurm_pgsql_la_rpath =
 DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm
 depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp
 am__depfiles_maybe = depfiles
@@ -110,10 +94,9 @@ CCLD = $(CC)
 LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
 	--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
 	$(LDFLAGS) -o $@
-SOURCES = $(libslurm_gold_la_SOURCES) $(libslurm_mysql_la_SOURCES) \
+SOURCES = $(libslurm_mysql_la_SOURCES) $(libslurm_pgsql_la_SOURCES)
+DIST_SOURCES = $(libslurm_mysql_la_SOURCES) \
 	$(libslurm_pgsql_la_SOURCES)
-DIST_SOURCES = $(am__libslurm_gold_la_SOURCES_DIST) \
-	$(libslurm_mysql_la_SOURCES) $(libslurm_pgsql_la_SOURCES)
 ETAGS = etags
 CTAGS = ctags
 DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
@@ -286,21 +269,10 @@ top_builddir = @top_builddir@
 top_srcdir = @top_srcdir@
 AUTOMAKE_OPTIONS = foreign
 INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common
-@HAVE_OPENSSL_FALSE@noinst_LTLIBRARIES = \
-@HAVE_OPENSSL_FALSE@	libslurm_mysql.la \
-@HAVE_OPENSSL_FALSE@	libslurm_pgsql.la
+noinst_LTLIBRARIES = \
+	libslurm_mysql.la \
+	libslurm_pgsql.la
 
-@HAVE_OPENSSL_TRUE@noinst_LTLIBRARIES = \
-@HAVE_OPENSSL_TRUE@	libslurm_mysql.la \
-@HAVE_OPENSSL_TRUE@	libslurm_pgsql.la \
-@HAVE_OPENSSL_TRUE@	libslurm_gold.la
-
-@HAVE_OPENSSL_TRUE@libslurm_gold_la_SOURCES = gold_interface.c gold_interface.h \
-@HAVE_OPENSSL_TRUE@	base64.c base64.h
-
-@HAVE_OPENSSL_TRUE@libslurm_gold_la_LIBADD = $(SSL_LIBS)
-@HAVE_OPENSSL_TRUE@libslurm_gold_la_LDFLAGS = $(LIB_LDFLAGS) $(SSL_LDFLAGS)
-@HAVE_OPENSSL_TRUE@libslurm_gold_la_CFLAGS = $(SSL_CPPFLAGS)
 libslurm_mysql_la_SOURCES = mysql_common.c mysql_common.h
 libslurm_pgsql_la_SOURCES = pgsql_common.c pgsql_common.h
 libslurm_mysql_la_LIBADD = $(MYSQL_LIBS) 
@@ -351,12 +323,10 @@ clean-noinstLTLIBRARIES:
 	  echo "rm -f \"$${dir}/so_locations\""; \
 	  rm -f "$${dir}/so_locations"; \
 	done
-libslurm_gold.la: $(libslurm_gold_la_OBJECTS) $(libslurm_gold_la_DEPENDENCIES) 
-	$(libslurm_gold_la_LINK) $(am_libslurm_gold_la_rpath) $(libslurm_gold_la_OBJECTS) $(libslurm_gold_la_LIBADD) $(LIBS)
 libslurm_mysql.la: $(libslurm_mysql_la_OBJECTS) $(libslurm_mysql_la_DEPENDENCIES) 
-	$(libslurm_mysql_la_LINK) $(am_libslurm_mysql_la_rpath) $(libslurm_mysql_la_OBJECTS) $(libslurm_mysql_la_LIBADD) $(LIBS)
+	$(libslurm_mysql_la_LINK)  $(libslurm_mysql_la_OBJECTS) $(libslurm_mysql_la_LIBADD) $(LIBS)
 libslurm_pgsql.la: $(libslurm_pgsql_la_OBJECTS) $(libslurm_pgsql_la_DEPENDENCIES) 
-	$(libslurm_pgsql_la_LINK) $(am_libslurm_pgsql_la_rpath) $(libslurm_pgsql_la_OBJECTS) $(libslurm_pgsql_la_LIBADD) $(LIBS)
+	$(libslurm_pgsql_la_LINK)  $(libslurm_pgsql_la_OBJECTS) $(libslurm_pgsql_la_LIBADD) $(LIBS)
 
 mostlyclean-compile:
 	-rm -f *.$(OBJEXT)
@@ -364,8 +334,6 @@ mostlyclean-compile:
 distclean-compile:
 	-rm -f *.tab.c
 
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libslurm_gold_la-base64.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libslurm_gold_la-gold_interface.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libslurm_mysql_la-mysql_common.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libslurm_pgsql_la-pgsql_common.Plo@am__quote@
 
@@ -390,20 +358,6 @@ distclean-compile:
 @AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
 @am__fastdepCC_FALSE@	$(LTCOMPILE) -c -o $@ $<
 
-libslurm_gold_la-gold_interface.lo: gold_interface.c
-@am__fastdepCC_TRUE@	$(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libslurm_gold_la_CFLAGS) $(CFLAGS) -MT libslurm_gold_la-gold_interface.lo -MD -MP -MF $(DEPDIR)/libslurm_gold_la-gold_interface.Tpo -c -o libslurm_gold_la-gold_interface.lo `test -f 'gold_interface.c' || echo '$(srcdir)/'`gold_interface.c
-@am__fastdepCC_TRUE@	mv -f $(DEPDIR)/libslurm_gold_la-gold_interface.Tpo $(DEPDIR)/libslurm_gold_la-gold_interface.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='gold_interface.c' object='libslurm_gold_la-gold_interface.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@	$(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libslurm_gold_la_CFLAGS) $(CFLAGS) -c -o libslurm_gold_la-gold_interface.lo `test -f 'gold_interface.c' || echo '$(srcdir)/'`gold_interface.c
-
-libslurm_gold_la-base64.lo: base64.c
-@am__fastdepCC_TRUE@	$(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libslurm_gold_la_CFLAGS) $(CFLAGS) -MT libslurm_gold_la-base64.lo -MD -MP -MF $(DEPDIR)/libslurm_gold_la-base64.Tpo -c -o libslurm_gold_la-base64.lo `test -f 'base64.c' || echo '$(srcdir)/'`base64.c
-@am__fastdepCC_TRUE@	mv -f $(DEPDIR)/libslurm_gold_la-base64.Tpo $(DEPDIR)/libslurm_gold_la-base64.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='base64.c' object='libslurm_gold_la-base64.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@	$(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libslurm_gold_la_CFLAGS) $(CFLAGS) -c -o libslurm_gold_la-base64.lo `test -f 'base64.c' || echo '$(srcdir)/'`base64.c
-
 libslurm_mysql_la-mysql_common.lo: mysql_common.c
 @am__fastdepCC_TRUE@	$(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libslurm_mysql_la_CFLAGS) $(CFLAGS) -MT libslurm_mysql_la-mysql_common.lo -MD -MP -MF $(DEPDIR)/libslurm_mysql_la-mysql_common.Tpo -c -o libslurm_mysql_la-mysql_common.lo `test -f 'mysql_common.c' || echo '$(srcdir)/'`mysql_common.c
 @am__fastdepCC_TRUE@	mv -f $(DEPDIR)/libslurm_mysql_la-mysql_common.Tpo $(DEPDIR)/libslurm_mysql_la-mysql_common.Plo
diff --git a/src/database/base64.c b/src/database/base64.c
deleted file mode 100644
index 92a8d8446..000000000
--- a/src/database/base64.c
+++ /dev/null
@@ -1,199 +0,0 @@
-/*****************************************************************************\
- *  base64.c - encoding for communication with gold.
- *
- *  $Id: storage_filetxt.c 10893 2007-01-29 21:53:48Z da $
- *****************************************************************************
- *  Copyright (C) 2004-2007 The Regents of the University of California.
- *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  Written by Danny Auble <da@llnl.gov>
- *  
- *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
- *  
- *  SLURM is free software; you can redistribute it and/or modify it under
- *  the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  In addition, as a special exception, the copyright holders give permission 
- *  to link the code of portions of this program with the OpenSSL library under
- *  certain conditions as described in each individual source file, and 
- *  distribute linked combinations including the two. You must obey the GNU 
- *  General Public License in all respects for all of the code used other than 
- *  OpenSSL. If you modify file(s) with this exception, you may extend this 
- *  exception to your version of the file(s), but you are not obligated to do 
- *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in 
- *  the program, then also delete it here.
- *  
- *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
- *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
- *  details.
- *  
- *  You should have received a copy of the GNU General Public License along
- *  with SLURM; if not, write to the Free Software Foundation, Inc.,
- *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
-\*****************************************************************************/
-
-#include "base64.h"
-#include <string.h>
-
-static char basis_64[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
-
-/* To tell if char is a valid base64 */
-static int _is_base64(unsigned char c) {
-	if((c >= '/' && c <= '9') 
-	   || (c >= 'A' && c <= 'Z')
-	   || (c >= 'a' && c <= 'z')
-	   || (c == '+')) 
-		return 1;
-	return 0;
-}
-
-/*
- * encode_base64 - given a char * of in_len will return an encoded
- *                 version
- * IN in_str - pointer to string to be encoded
- * IN in_len - string length of in_str
- * RET pointer to encoded string or NULL on failure 
- * NOTE: allocates memory that should be xfreed with xfree.
- */
-extern unsigned char *encode_base64(const unsigned char* in_str, 
-				    unsigned int in_len)
-{
-	unsigned char *ret_str = NULL;
-	int i = 0;
-	int j = 0;
-	unsigned char char_array_3[3];
-	unsigned char char_array_4[4];
-	int pos = 0;
-	/* calculate the length of the result */
-	int rlen = (in_len+2) / 3 * 4;	 /* encoded bytes */
-
-	rlen++; /* for the eol */
-	ret_str = xmalloc(sizeof(unsigned char) * rlen);
-	
-	debug4("encoding %s", in_str);
-
-	while (in_len--) {
-		char_array_3[i++] = *(in_str++);
-		if (i == 3) {
-			char_array_4[0] = (char_array_3[0] & 0xfc) >> 2;
-			char_array_4[1] = ((char_array_3[0] & 0x03) << 4)
-				+ ((char_array_3[1] & 0xf0) >> 4);
-			char_array_4[2] = ((char_array_3[1] & 0x0f) << 2)
-				+ ((char_array_3[2] & 0xc0) >> 6);
-			char_array_4[3] = char_array_3[2] & 0x3f;
-			
-			for(i = 0; (i <4) ; i++)
-				ret_str[pos++] = basis_64[char_array_4[i]];
-			i = 0;
-		}
-	}
-	
-	if (i) {
-		for(j = i; j < 3; j++)
-			char_array_3[j] = '\0';
-		
-		char_array_4[0] = (char_array_3[0] & 0xfc) >> 2;
-		char_array_4[1] = ((char_array_3[0] & 0x03) << 4)
-			+ ((char_array_3[1] & 0xf0) >> 4);
-		char_array_4[2] = ((char_array_3[1] & 0x0f) << 2)
-			+ ((char_array_3[2] & 0xc0) >> 6);
-		char_array_4[3] = char_array_3[2] & 0x3f;
-		
-		for (j = 0; (j < i + 1); j++)
-			ret_str[pos++] = basis_64[char_array_4[j]];
-		
-		while((i++ < 3))
-			ret_str[pos++] = '=';
-		
-	}
-
-	debug4("encoded %s", ret_str);
-	
-	return ret_str;
-}
-
-/*
- * decode_base64 - given a char * will return a decoded version
- *
- * IN in_str - pointer to string to be decoded
- * RET pointer to decoded string or NULL on failure
- * NOTE: allocates memory that should be xfreed with xfree.
- */
-extern unsigned char *decode_base64(const unsigned char *in_str)
-{
-	int pos = 0;
-	int in_len = strlen((char *)in_str);
-	int i = 0;
-	int j = 0;
-	int in_pos = 0;
-	unsigned char char_array_4[4], char_array_3[3];
-	unsigned char *ret_str = NULL;
-
-	int rlen = in_len * 3 / 4; /* always enough, but sometimes too
-				    * much */
-       	
-	debug4("decoding %s", in_str);
-
-	ret_str = xmalloc(sizeof(unsigned char) * rlen);
-	memset(ret_str, 0, rlen);
-	
-	while (in_len-- && ( in_str[in_pos] != '=')
-	       && _is_base64(in_str[in_pos])) {
-		char_array_4[i++] = in_str[in_pos];
-		in_pos++;
-		if (i == 4) {
-			for (i=0; i<4; i++) {
-				int found = 0;
-				while(basis_64[found] 
-				      && basis_64[found] != char_array_4[i])
-					found++;
-				if(!basis_64[found]) 
-					found = 0;
-				char_array_4[i] = found;
-			}
-			char_array_3[0] = (char_array_4[0] << 2) 
-				+ ((char_array_4[1] & 0x30) >> 4);
-			char_array_3[1] = ((char_array_4[1] & 0xf) << 4) 
-				+ ((char_array_4[2] & 0x3c) >> 2);
-			char_array_3[2] = ((char_array_4[2] & 0x3) << 6)
-				+ char_array_4[3];
-			for (i = 0; i<3; i++)
-				ret_str[pos++] = char_array_3[i];
-			i = 0;
-		}
-	}
-
-	if (i) {
-		for (j=i; j<4; j++)
-			char_array_4[j] = 0;
-
-		for (j=0; j<4; j++) {
-			int found = 0;
-			while(basis_64[found] 
-			      && basis_64[found] != char_array_4[j])
-				found++;
-			if(!basis_64[found]) 
-				found = 0;
-			
-			char_array_4[j] = found;
-		}
-
-		char_array_3[0] = (char_array_4[0] << 2) 
-			+ ((char_array_4[1] & 0x30) >> 4);
-		char_array_3[1] = ((char_array_4[1] & 0xf) << 4)
-			+ ((char_array_4[2] & 0x3c) >> 2);
-		char_array_3[2] = ((char_array_4[2] & 0x3) << 6) 
-			+ char_array_4[3];
-
-		for (j = 0; (j < i - 1); j++)
-			ret_str[pos++] = char_array_3[j];
-	}
-
-	debug4("decoded %s", ret_str);
-
-	return ret_str;
-}
diff --git a/src/database/base64.h b/src/database/base64.h
deleted file mode 100644
index a5a9980d8..000000000
--- a/src/database/base64.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/*****************************************************************************\
- *  base64.h - encoding for communication with gold.
- *
- *  $Id: storage_filetxt.c 10893 2007-01-29 21:53:48Z da $
- *****************************************************************************
- *  Copyright (C) 2004-2007 The Regents of the University of California.
- *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  Written by Danny Auble <da@llnl.gov>
- *  
- *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
- *  
- *  SLURM is free software; you can redistribute it and/or modify it under
- *  the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  In addition, as a special exception, the copyright holders give permission 
- *  to link the code of portions of this program with the OpenSSL library under
- *  certain conditions as described in each individual source file, and 
- *  distribute linked combinations including the two. You must obey the GNU 
- *  General Public License in all respects for all of the code used other than 
- *  OpenSSL. If you modify file(s) with this exception, you may extend this 
- *  exception to your version of the file(s), but you are not obligated to do 
- *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in 
- *  the program, then also delete it here.
- *  
- *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
- *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
- *  details.
- *  
- *  You should have received a copy of the GNU General Public License along
- *  with SLURM; if not, write to the Free Software Foundation, Inc.,
- *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
-\*****************************************************************************/
-#ifndef _HAVE_GOLD_BASE64_H
-#define _HAVE_GOLD_BASE64_H
-
-#ifdef HAVE_CONFIG_H
-#  include "config.h"
-#endif
-
-#if HAVE_STDINT_H
-#  include <stdint.h>
-#endif
-#if HAVE_INTTYPES_H
-#  include <inttypes.h>
-#endif
-
-#include "src/common/xmalloc.h"
-
-/*
- * encode_base64 - given a char * of in_len will return an encoded
- *                 version
- * IN in_str - pointer to string to be encoded
- * IN in_len - string length of in_str
- * RET pointer to encoded string 
- * NOTE: allocates memory that should be xfreed with xfree.
- */
-extern unsigned char *encode_base64(const unsigned char *in_str,
-				    unsigned int in_len);
-
-/*
- * decode_base64 - given a char * will return a decoded version
- *
- * IN in_str - pointer to string to be decoded
- * RET pointer to decoded string
- * NOTE: allocates memory that should be xfreed with xfree.
- */
-extern unsigned char *decode_base64(const unsigned char *in_str);
-
-#endif
diff --git a/src/database/gold_interface.c b/src/database/gold_interface.c
deleted file mode 100644
index 080f4e495..000000000
--- a/src/database/gold_interface.c
+++ /dev/null
@@ -1,623 +0,0 @@
-/*****************************************************************************\
- *  gold_interface.h - interface to the gold daemon commands.
- *
- *  $Id: storage_filetxt.c 10893 2007-01-29 21:53:48Z da $
- *****************************************************************************
- *  Copyright (C) 2004-2007 The Regents of the University of California.
- *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  Written by Danny Auble <da@llnl.gov>
- *  
- *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
- *  
- *  SLURM is free software; you can redistribute it and/or modify it under
- *  the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  In addition, as a special exception, the copyright holders give permission 
- *  to link the code of portions of this program with the OpenSSL library under
- *  certain conditions as described in each individual source file, and 
- *  distribute linked combinations including the two. You must obey the GNU 
- *  General Public License in all respects for all of the code used other than 
- *  OpenSSL. If you modify file(s) with this exception, you may extend this 
- *  exception to your version of the file(s), but you are not obligated to do 
- *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in 
- *  the program, then also delete it here.
- *  
- *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
- *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
- *  details.
- *  
- *  You should have received a copy of the GNU General Public License along
- *  with SLURM; if not, write to the Free Software Foundation, Inc.,
- *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
-\*****************************************************************************/
-
-#include "gold_interface.h"
-#include "base64.h"
-
-#include <openssl/hmac.h>
-#include <openssl/sha.h>
-
-#include "src/common/slurm_protocol_interface.h"
-#include "src/common/slurm_protocol_api.h"
-#include "src/common/uid.h"
-
-#define MAX_RETRY 5
-
-/* This should be updated to match the gold_object_t enum */
-char *GOLD_OBJECT_STR[] = {
-	"Account", 
-	"User", 
-	"Project", 
-	"Machine", 
-	"Job", 
-	"RoleUser", 
-	"EventLog",
-	"MachineHourUsage", 
-	"MachineDayUsage", 
-	"MachineMonthUsage",
-	"AccountHourUsage", 
-	"AccountDayUsage", 
-	"AccountMonthUsage",
-	NULL
-};
-
-static char *gold_key = NULL;
-static char *gold_host = NULL;
-static uint16_t gold_port = 0;
-static int gold_init = 0;
-pthread_mutex_t gold_mutex = PTHREAD_MUTEX_INITIALIZER;
-
-static char *_get_return_value(char *gold_msg, int *i)
-{
-	char tmp_buff[256];
-	int j=0;
-	int pos = (*i);
-
-	while(gold_msg[pos] && j < 256) {
-		if(gold_msg[pos] == '<') 
-			break;
-		
-		tmp_buff[j++] = gold_msg[pos++];
-	}
-
-	while(gold_msg[pos] && gold_msg[pos] != '>')
-		pos++;
-
-	(*i) = pos;
-
-	tmp_buff[j] = '\0';
-	return xstrdup(tmp_buff);
-}
-
-static char *_get_return_name(char *gold_msg, int *i)
-{
-	char tmp_buff[256];
-	int j=0;
-	int pos = *i;
-
-	while(gold_msg[pos] && j < 256) {
-		if(gold_msg[pos] == '>') 
-			break;
-		
-		tmp_buff[j++] = gold_msg[pos++];
-	}
-
-	(*i) = pos+1;
-
-	tmp_buff[j] = '\0';
-	return xstrdup(tmp_buff);
-}
-
-static gold_response_entry_t *_create_response_entry(char *object,
-						     char *gold_msg, int *i) 
-{
-	gold_response_entry_t *resp_entry =
-		xmalloc(sizeof(gold_response_entry_t));
-	gold_name_value_t *name_val = NULL;
-	int olen = strlen(object);
-
-	/* FIXME: we might want to check if the last char was a < to
-	 * add this if it is 
-	 */
-	(*i) += (olen + 1); //assume what is coming in is the name
-	resp_entry->name_val = list_create(destroy_gold_name_value);
-	while(gold_msg[*i]) {
-		if(!strncmp(gold_msg+(*i), object, olen)) {
-			(*i) += (olen + 1); //get to the end of the object
-			break;
-		} else if(gold_msg[(*i)] == '<' && gold_msg[(*i)+1] != '/') {
-			// found the front of a selection
-			(*i)++;
-			
-			name_val = xmalloc(sizeof(gold_name_value_t));
-			name_val->name = _get_return_name(gold_msg, i);
-			name_val->value = _get_return_value(gold_msg, i);
-			
-			debug4("got %s = %s", name_val->name, name_val->value);
-			list_append(resp_entry->name_val, name_val);
-		}
-		(*i)++;
-	}
-
-	return resp_entry;
-}
-
-static slurm_fd _start_communication()
-{
-	static slurm_addr gold_addr;
-	static int gold_addr_set = 0;
-	char *init_msg = "POST /SSSRMAP3 HTTP/1.1\r\nContent-Type: text/xml; charset=\"utf-8\"\r\nTransfer-Encoding: chunked\r\n\r\n";
-	int rc = 0;
-	slurm_fd gold_fd = 0;
-
-	if(!gold_init) {
-		error("start_gold_communication: "
-		      "need to run setup_gold_info before this");
-		return 0;
-	}
-	
-	if(!gold_addr_set) {
-		slurm_set_addr(&gold_addr, gold_port, gold_host);
-		gold_addr_set = 1;
-	}
-	
-	if ((gold_fd = slurm_open_msg_conn(&gold_addr)) < 0) {
-		error("start_gold_communication to %s: %m", gold_host);
-		return 0;
-	}
-
-	debug3("Connected to %s(%d)", gold_host, gold_port);
-	rc = _slurm_send_timeout(gold_fd, init_msg, strlen(init_msg),
-				 SLURM_PROTOCOL_NO_SEND_RECV_FLAGS,
-				 (slurm_get_msg_timeout() * 1000));
-	
-	if (rc < 0) {
-		error("_slurm_send_timeout: %m");
-		return 0;
-	}
-	return gold_fd;
-}
-
-static int _end_communication(slurm_fd gold_fd)
-{
-	int rc = SLURM_SUCCESS;
-	int retry = 0;
-	/* 
-	 *  Attempt to close an open connection
-	 */
-	while ((slurm_shutdown_msg_conn(gold_fd) < 0) && (errno == EINTR)) {
-		if (retry++ > MAX_RETRY) {
-			rc = SLURM_ERROR;
-			break;
-		}
-	}
-	return rc;
-}
-
-extern int init_gold(char *keyfile, char *host, uint16_t port)
-{
-	int fp;
-	char key[256];
-	int i, bytes_read;
-	
-	if(!keyfile || !host) {
-		error("init_gold: Either no keyfile or host given");
-		return SLURM_ERROR;
-	}
-
-	fp = open(keyfile, O_RDONLY);
-	if (fp < 0)
-		fatal("Error opening gold keyfile (%s): %m\n", keyfile);
-	bytes_read = read(fp, key, sizeof(key) - 1);
-	if (bytes_read == -1) {
-		fatal("Error reading hash key from keyfile (%s): %m\n",
-		      keyfile);
-	}
-	key[bytes_read] = '\0'; /* Null terminate the string */
-	for (i = 0; i<bytes_read; i++) /* Remove carriage return if any */
-	{
-		if (key[i] == '\n' || key[i] == '\r') {
-			key[i] = '\0';
-			break;
-		}
-	}
-	
-	/* Close the file */
-	close(fp);
-	//debug4("got the tolken as %s\n", key);
-	gold_key = xstrdup(key);
-	gold_host = xstrdup(host);
-	gold_port = port;
-	gold_init = 1;
-	
-	return SLURM_SUCCESS;
-}
-
-extern int fini_gold()
-{
-	gold_init = 0;
-	xfree(gold_key);
-	xfree(gold_host);
-	
-	return SLURM_SUCCESS;
-}
-
-extern gold_request_t *create_gold_request(gold_object_t object,
-					   gold_object_t action)
-{
-	gold_request_t *gold_request = NULL;
-	
-	gold_request = xmalloc(sizeof(gold_request_t));
-
-	gold_request->object = object;
-	gold_request->action = action;
-	gold_request->assignments = list_create(destroy_gold_name_value);
-	gold_request->conditions = list_create(destroy_gold_name_value);
-	gold_request->selections = list_create(destroy_gold_char);
-	
-	return gold_request;
-}
-
-extern int destroy_gold_request(gold_request_t *gold_request)
-{
-	if(gold_request) {
-		if(gold_request->assignments)
-			list_destroy(gold_request->assignments);
-		if(gold_request->conditions)
-			list_destroy(gold_request->conditions);
-		if(gold_request->selections)
-			list_destroy(gold_request->selections);
-		xfree(gold_request->body);
-		xfree(gold_request->digest);
-		xfree(gold_request->signature);
-		xfree(gold_request);
-	}
-	return SLURM_SUCCESS;
-}
-
-extern int gold_request_add_assignment(gold_request_t *gold_request, 
-				       char *name, char *value)
-{
-	gold_name_value_t *name_val = xmalloc(sizeof(gold_name_value_t));
-	name_val->name = xstrdup(name);
-	name_val->value = xstrdup(value);
-	list_append(gold_request->assignments, name_val);
-		
-	return SLURM_SUCCESS;
-}
-
-extern int gold_request_add_condition(gold_request_t *gold_request, 
-				      char *name, char *value,
-				      gold_operator_t op,
-				      int or_statement)
-{
-	gold_name_value_t *name_val = xmalloc(sizeof(gold_name_value_t));
-	name_val->name = xstrdup(name);
-	name_val->value = xstrdup(value);
-	name_val->op = op;
-	name_val->or_statement = or_statement;
-	list_append(gold_request->conditions, name_val);
-		
-	return SLURM_SUCCESS;
-}
-
-extern int gold_request_add_selection(gold_request_t *gold_request, char *name)
-{
-	list_append(gold_request->selections, xstrdup(name));
-	return SLURM_SUCCESS;
-}
-
-
-extern gold_response_t *get_gold_response(gold_request_t *gold_request)
-{
-	unsigned int slen = EVP_MAX_MD_SIZE;
-	unsigned int dlen = SHA_DIGEST_LENGTH;
-	unsigned char digest[dlen];
-	unsigned char signature[slen];
-	char c, *user_name;
-	char *object = NULL;
-	char *action = NULL;
-	char *innerds = NULL;
-	char *gold_msg = NULL;
-	char tmp_buff[256];
-	char *tmp_char = NULL;
-	uint32_t ret_len = 0;
-	static int timeout = 0;
-	gold_response_t *gold_response = NULL;
-	gold_name_value_t *name_val = NULL;
-	ListIterator itr = NULL;
-	int rc = 0, i = 0;
-	slurm_fd gold_fd = 0;
-
-	if(!gold_init) {
-		error("get_gold_response: "
-		      "need to run setup_gold_info before this");
-		return NULL;
-	} else if(!gold_request) {
-		error("get_gold_response: No request given.");
-		return NULL;
-	}
-
-	if(!timeout) 
-		timeout = (slurm_get_msg_timeout() * 1000);
-	
-	if(gold_request->object >= GOLD_OBJECT_COUNT) {
-		error("get_gold_response: "
-		      "unsupported object %d", gold_request->object);
-		return NULL;
-	}
-	object = GOLD_OBJECT_STR[gold_request->object];
-
-	switch(gold_request->action) {
-	case GOLD_ACTION_QUERY:
-		action = GOLD_ACTION_QUERY_STR;
-		itr = list_iterator_create(gold_request->selections);
-		while((tmp_char = list_next(itr))) {
-			xstrfmtcat(innerds, "<Get name=\"%s\"></Get>",
-				   tmp_char);
-		}
-		list_iterator_destroy(itr);
-		
-		break;
-	case GOLD_ACTION_CREATE:
-		action = GOLD_ACTION_CREATE_STR;
-		itr = list_iterator_create(gold_request->assignments);
-		while((name_val = list_next(itr))) {
-			xstrfmtcat(innerds, "<Set name=\"%s\">%s</Set>",
-				   name_val->name, name_val->value);
-		}
-		list_iterator_destroy(itr);
-		break;
-	case GOLD_ACTION_MODIFY:
-		action = GOLD_ACTION_MODIFY_STR;
-		itr = list_iterator_create(gold_request->assignments);
-		while((name_val = list_next(itr))) {
-			xstrfmtcat(innerds, "<Set name=\"%s\">%s</Set>",
-				   name_val->name, name_val->value);
-		}
-		list_iterator_destroy(itr);
-		break;
-	case GOLD_ACTION_DELETE:
-		action = GOLD_ACTION_DELETE_STR;
-		break;
-	default:
-		error("get_gold_response: "
-		      "unsupported action %d", gold_request->action);
-	}
-
-	itr = list_iterator_create(gold_request->conditions);
-	while((name_val = list_next(itr))) {
-		xstrfmtcat(innerds, "<Where name=\"%s\"", name_val->name);
-
-		if(name_val->op != GOLD_OPERATOR_NONE) {
-			char *op = NULL;
-			switch (name_val->op) {
-			case GOLD_OPERATOR_G :
-				op = "G";
-				break;
-			case GOLD_OPERATOR_GE :
-				op = "GE";
-				break;
-			case GOLD_OPERATOR_L :
-				op = "L";
-				break;
-			case GOLD_OPERATOR_LE :
-				op = "LE";
-				break;
-			default:
-				error("Unknown operator '%d' "
-				      "given to this condition %s = %s",
-				      name_val->op, name_val->name,
-				      name_val->value);
-				xfree(innerds);
-				list_iterator_destroy(itr);
-				return NULL;
-			}
-			
-			xstrfmtcat(innerds, " op=\"%s\"", op);
-		} 
-
-		if(name_val->or_statement == 1) 
-			xstrfmtcat(innerds, " conj=\"Or\" groups=\"-1\"");
-		else if (name_val->or_statement == 2)
-			xstrfmtcat(innerds, " conj=\"And\" groups=\"+1\"");
-
-		xstrfmtcat(innerds, ">%s</Where>", name_val->value);
-	}
-	list_iterator_destroy(itr);
-
-	user_name = uid_to_string(geteuid());
-	xstrfmtcat(gold_request->body,
-		   "<Body><Request action=\"%s\" actor=\"%s\">"
-		   "<Object>%s</Object>",
-		   action, user_name, object);
-	xfree(user_name);
-	if(innerds) {
-		xstrcat(gold_request->body, innerds);
-		xfree(innerds);
-	}
-	xstrcat(gold_request->body, "</Request></Body>");
-
-	SHA1((unsigned char *)gold_request->body, strlen(gold_request->body),
-	     digest);	
-	gold_request->digest = encode_base64(digest, dlen);
-	HMAC(EVP_sha1(), gold_key, strlen(gold_key),
-	     digest, dlen, signature, &slen);
-	gold_request->signature = encode_base64(signature, slen);
-
-	xstrfmtcat(gold_msg,
-		   "<?xml version='1.0' encoding='UTF-8'?><Envelope>%s"
-		   "<Signature><DigestValue>%s</DigestValue>"
-		   "<SignatureValue>%s</SignatureValue>"
-		   "<SecurityToken type='Symmetric'></SecurityToken>"
-		   "</Signature></Envelope>",
-		   gold_request->body, gold_request->digest,
-		   gold_request->signature);
-	
-	snprintf(tmp_buff, sizeof(tmp_buff), "%X\r\n",
-		 (unsigned int)strlen(gold_msg));	
-
-	/* I wish gold could do persistant connections but it only
-	 * does one and then ends it so we have to do that also so
-	 * every time we start a connection we have to finish it. As
-	 * since we can only send one thing at a time we have to lock
-	 * the connection.
-	 */
-//	slurm_mutex_lock(&gold_mutex);
-	if(!(gold_fd = _start_communication())) {
-		//slurm_mutex_unlock(&gold_mutex);
-		return NULL;
-	}
-	rc = _slurm_send_timeout(gold_fd, tmp_buff, strlen(tmp_buff),
-				 SLURM_PROTOCOL_NO_SEND_RECV_FLAGS,
-				 timeout);
-	
-	if (rc < 0) {
-		error("get_gold_response 1: _slurm_send_timeout: %m");
-		goto error;
-	}
-
-	debug3("sending %d '%s'", rc, gold_msg);
-
-	xstrcat(gold_msg, "0\r\n");
-	rc = _slurm_send_timeout(gold_fd, gold_msg, strlen(gold_msg),
-				 SLURM_PROTOCOL_NO_SEND_RECV_FLAGS,
-				 timeout);
-	if (rc < 0) {
-		error("get_gold_response 2: _slurm_send_timeout: %m");
-		goto error;
-	}
-	
-	xfree(gold_msg);
-	
-	/* we will always get this header 
-	 * HTTP/1.1 200 OK 17
-	 * Content-Type: text/xml; charset="utf-8" 42
-	 * Transfer-Encoding: chunked 28
-	 *  
-	 * which translates to 87 chars
-	 */ 
-	if(_slurm_recv_timeout(gold_fd, tmp_buff, 87, 0, timeout) < 0) {
-		error("get_gold_response: "
-		      "couldn't get the header of the message");
-		goto error;
-	}
-	debug5("got the header '%s'", tmp_buff);
-	
-	/* then get the size which is ended with '\r\n'
-	 */
-	i = 0;
-	while(read(gold_fd, &c, 1) > 0) {
-		if(c == '\r') {
-			read(gold_fd, &c, 1);
-			break;
-		}
-		tmp_buff[i++] = c; //////// getting command string
-		
-	}
-	tmp_buff[i] = '\0';
-	ret_len = xstrntol(tmp_buff, NULL, i, 16);
-		
-	debug4("got size %d", ret_len);
-	
-	gold_msg = xmalloc(ret_len+1);
-	
-	if(_slurm_recv_timeout(gold_fd, gold_msg, ret_len, 0, timeout) < 0) {
-		error("get_gold_response: "
-		      "couldn't get the message");
-		goto error;
-	}
-
-	debug3("got back '%s'", gold_msg);
-	if(_slurm_recv_timeout(gold_fd, tmp_buff, 3, 0, timeout) < 0) {
-		error("get_gold_response: "
-		      "couldn't get the end of the message");
-		goto error;
-	}
-	
-	gold_response = xmalloc(sizeof(gold_response_t));
-	gold_response->entries = list_create(destroy_gold_response_entry);
-	i = 0;
-	while(gold_msg[i]) {
-		if(!strncmp(gold_msg+i, "<Code>", 6)) {
-			i+=6;
-			gold_response->rc = atoi(gold_msg+i);
-		} else if(!strncmp(gold_msg+i, "<Count>", 7)) {
-			i+=7;
-			gold_response->entry_cnt = atoi(gold_msg+i);
-		} else if(!strncmp(gold_msg+i, "<Message>", 9)) {
-			int msg_end = 0;
-
-			i+=9;
-			msg_end = i;
-			while(gold_msg[msg_end] != '<') 
-				msg_end++;
-			
-			gold_response->message = 
-				xstrndup(gold_msg+i, msg_end-i);
-			i = msg_end + 10;
-		} else if(!strncmp(gold_msg+i, object, strlen(object))) {
-			gold_response_entry_t *resp_entry =
-				_create_response_entry(object, gold_msg, &i);
-			list_append(gold_response->entries, resp_entry);
-		}
-		i++;	
-	}
-	xfree(gold_msg);
-
-error:
-	/* I wish gold could do persistant connections but it only
-	 * does one and then ends it so we have to do that also so
-	 * every time we start a connection we have to finish it.
-	 */
-	_end_communication(gold_fd);
-//	slurm_mutex_unlock(&gold_mutex);
-		
-	return gold_response;
-
-}
-
-extern int destroy_gold_response(gold_response_t *gold_response)
-{
-	if(gold_response) {
-		xfree(gold_response->message);
-		if(gold_response->entries) 
-			list_destroy(gold_response->entries);
-		
-		xfree(gold_response);
-	}
-	return SLURM_SUCCESS;
-}
-
-extern void destroy_gold_name_value(void *object)
-{
-	gold_name_value_t *name_val = (gold_name_value_t *)object;
-
-	if(name_val) {
-		xfree(name_val->name);
-		xfree(name_val->value);
-		xfree(name_val);
-	}
-}
-
-extern void destroy_gold_char(void *object)
-{
-	char *name_val = (char *)object;
-	xfree(name_val);
-}
-
-extern void destroy_gold_response_entry(void *object)
-{
-	gold_response_entry_t *resp_entry = (gold_response_entry_t *)object;
-
-	if(resp_entry) {
-		list_destroy(resp_entry->name_val);
-		xfree(resp_entry);
-	}
-}
-
diff --git a/src/database/gold_interface.h b/src/database/gold_interface.h
deleted file mode 100644
index 0ed77a11b..000000000
--- a/src/database/gold_interface.h
+++ /dev/null
@@ -1,170 +0,0 @@
-/*****************************************************************************\
- *  gold_interface.h - interface to the gold daemon commands.
- *
- *  $Id: storage_filetxt.c 10893 2007-01-29 21:53:48Z da $
- *****************************************************************************
- *  Copyright (C) 2004-2007 The Regents of the University of California.
- *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  Written by Danny Auble <da@llnl.gov>
- *  
- *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
- *  
- *  SLURM is free software; you can redistribute it and/or modify it under
- *  the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  In addition, as a special exception, the copyright holders give permission 
- *  to link the code of portions of this program with the OpenSSL library under
- *  certain conditions as described in each individual source file, and 
- *  distribute linked combinations including the two. You must obey the GNU 
- *  General Public License in all respects for all of the code used other than 
- *  OpenSSL. If you modify file(s) with this exception, you may extend this 
- *  exception to your version of the file(s), but you are not obligated to do 
- *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in 
- *  the program, then also delete it here.
- *  
- *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
- *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
- *  details.
- *  
- *  You should have received a copy of the GNU General Public License along
- *  with SLURM; if not, write to the Free Software Foundation, Inc.,
- *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
-\*****************************************************************************/
-#ifndef _HAVE_GOLD_INTERFACE_H
-#define _HAVE_GOLD_INTERFACE_H
-
-
-#ifdef HAVE_CONFIG_H
-#  include "config.h"
-#endif
-
-#if HAVE_STDINT_H
-#  include <stdint.h>
-#endif
-#if HAVE_INTTYPES_H
-#  include <inttypes.h>
-#endif
-
-#include <netdb.h>
-
-#include <stdio.h>
-#include <slurm/slurm_errno.h>
-
-#include "src/common/xmalloc.h"
-#include "src/common/list.h"
-#include "src/common/xstring.h"
-
-#define	GOLD_ACTION_QUERY_STR "Query"
-#define GOLD_ACTION_CREATE_STR "Create"
-#define GOLD_ACTION_MODIFY_STR "Modify"
-#define GOLD_ACTION_DELETE_STR "Delete"
-
-#define GOLD_OBJECT_ACCT_STR "Account"
-#define GOLD_OBJECT_USER_STR "User"
-#define GOLD_OBJECT_PROJECT_STR "Project"
-#define GOLD_OBJECT_MACHINE_STR "Machine"
-#define GOLD_OBJECT_JOB_STR "Job"
-#define GOLD_OBJECT_ROLEUSER_STR "RoleUser"
-#define GOLD_OBJECT_EVENT_STR "EventLog"
-#define GOLD_OBJECT_MACHINE_HOUR_STR "MachineHourUsage"
-#define GOLD_OBJECT_MACHINE_DAY_STR "MachineDayUsage"
-#define GOLD_OBJECT_MACHINE_MONTH_STR "MachineMonthUsage"
-#define GOLD_OBJECT_ACCT_HOUR_STR "AccountHourUsage"
-#define GOLD_OBJECT_ACCT_DAY_STR "AccountDayUsage"
-#define GOLD_OBJECT_ACCT_MONTH_STR "AccountMonthUsage"
-
-typedef enum {
-	GOLD_ACTION_QUERY,
-	GOLD_ACTION_CREATE,
-	GOLD_ACTION_MODIFY,
-	GOLD_ACTION_DELETE,
-	GOLD_ACTION_COUNT
-} gold_action_t;
-
-/* When changing this you would also make GOLD_OBJECT_STR match
- * defined in gold_interface.c */
-typedef enum {
-	GOLD_OBJECT_ACCT,
-	GOLD_OBJECT_USER,
-	GOLD_OBJECT_PROJECT,
-	GOLD_OBJECT_MACHINE,
-	GOLD_OBJECT_JOB,
-	GOLD_OBJECT_ROLEUSER,
-	GOLD_OBJECT_EVENT,
-	GOLD_OBJECT_MACHINE_HOUR_USAGE,
-	GOLD_OBJECT_MACHINE_DAY_USAGE,
-	GOLD_OBJECT_MACHINE_MONTH_USAGE,
-	GOLD_OBJECT_ACCT_HOUR_USAGE,
-	GOLD_OBJECT_ACCT_DAY_USAGE,
-	GOLD_OBJECT_ACCT_MONTH_USAGE,
-	GOLD_OBJECT_COUNT
-} gold_object_t;
-
-typedef enum {
-	GOLD_OPERATOR_NONE,
-	GOLD_OPERATOR_G,
-	GOLD_OPERATOR_GE,
-	GOLD_OPERATOR_L,
-	GOLD_OPERATOR_LE,
-	GOLD_OPERATOR_COUNT
-} gold_operator_t;
-
-typedef struct {
-	char *name;
-	char *value;
-	gold_operator_t op;
-	int or_statement; // 0 for nothing 1 for or last 2 for or next
-} gold_name_value_t;
-
-typedef struct {
-	gold_object_t object;
-	gold_action_t action;
-	List assignments; // List of gold_name_value_t's
-	List conditions; // List of gold_name_value_t's
-	List selections; // List of char *'s
-	char *body;
-	unsigned char *digest;
-	unsigned char *signature;
-} gold_request_t;
-
-typedef struct {
-	List name_val; // List of gold_name_value_t's
-} gold_response_entry_t;
-
-typedef struct {
-	List entries; // List of gold_response_entry_t's
-	int entry_cnt;
-	char *message;
-	int rc;
-} gold_response_t;
-
-extern char *GOLD_OBJECT_STR[];
-
-extern int init_gold(char *keyfile, char *host, uint16_t port);
-extern int fini_gold();
-
-extern gold_request_t *create_gold_request(gold_object_t object,
-					   gold_object_t action);
-extern int destroy_gold_request(gold_request_t *gold_request);
-
-extern int gold_request_add_assignment(gold_request_t *gold_request, 
-				       char *name, char *value);
-extern int gold_request_add_condition(gold_request_t *gold_request, 
-				      char *name, char *value,
-				      gold_operator_t op,
-				      int or_statement);
-extern int gold_request_add_selection(gold_request_t *gold_request, char *name);
-
-extern gold_response_t *get_gold_response(gold_request_t *gold_request);
-extern int destroy_gold_response(gold_response_t *gold_response);
-
-extern void destroy_gold_name_value(void *object);
-extern void destroy_gold_char(void *object);
-extern void destroy_gold_response_entry(void *object);
-
-#endif
diff --git a/src/database/mysql_common.c b/src/database/mysql_common.c
index 35847b0a2..96107e7a5 100644
--- a/src/database/mysql_common.c
+++ b/src/database/mysql_common.c
@@ -43,7 +43,9 @@
 #include "src/common/timers.h"
 #include "src/common/slurm_protocol_api.h"
 
+#ifdef MYSQL_NOT_THREAD_SAFE
 pthread_mutex_t mysql_lock = PTHREAD_MUTEX_INITIALIZER;
+#endif
 
 #ifdef HAVE_MYSQL
 
@@ -184,7 +186,9 @@ static int _create_db(char *db_name, mysql_db_info_t *db_info)
 	char create_line[50];
 	MYSQL *mysql_db = NULL;
 
-//	slurm_mutex_lock(&mysql_lock);
+#ifdef MYSQL_NOT_THREAD_SAFE
+	slurm_mutex_lock(&mysql_lock);
+#endif
 	if(!(mysql_db = mysql_init(mysql_db)))
 		fatal("mysql_init failed: %s", mysql_error(mysql_db));
 	
@@ -203,12 +207,16 @@ static int _create_db(char *db_name, mysql_db_info_t *db_info)
 		     "user = %s pass = %s port = %u",
 		     db_info->host, db_info->user,
 		     db_info->pass, db_info->port);
+#ifdef MYSQL_NOT_THREAD_SAFE
 		slurm_mutex_unlock(&mysql_lock);
+#endif
 		fatal("mysql_real_connect failed: %d %s\n",
 		      mysql_errno(mysql_db),
 		      mysql_error(mysql_db));
 	}
-//	slurm_mutex_unlock(&mysql_lock);
+#ifdef MYSQL_NOT_THREAD_SAFE
+	slurm_mutex_unlock(&mysql_lock);
+#endif
 	return SLURM_SUCCESS;
 }
 
@@ -292,8 +300,9 @@ extern int mysql_db_query(MYSQL *mysql_db, char *query)
 {
 	if(!mysql_db)
 		fatal("You haven't inited this storage yet.");
+#ifdef MYSQL_NOT_THREAD_SAFE
 	slurm_mutex_lock(&mysql_lock);
-
+#endif
 	/* clear out the old results so we don't get a 2014 error */
 	_clear_results(mysql_db);		
 //try_again:
@@ -306,11 +315,15 @@ extern int mysql_db_query(MYSQL *mysql_db, char *query)
 		      mysql_errno(mysql_db),
 		      mysql_error(mysql_db), query);
 		errno = mysql_errno(mysql_db);
+#ifdef MYSQL_NOT_THREAD_SAFE
 		slurm_mutex_unlock(&mysql_lock);
+#endif
 		return SLURM_ERROR;
 	}
-	slurm_mutex_unlock(&mysql_lock);
 
+#ifdef MYSQL_NOT_THREAD_SAFE
+	slurm_mutex_unlock(&mysql_lock);
+#endif
 	return SLURM_SUCCESS;
 }
 
@@ -323,8 +336,9 @@ extern int mysql_db_ping(MYSQL *mysql_db)
 
 extern int mysql_db_commit(MYSQL *mysql_db)
 {
-	//slurm_mutex_lock(&mysql_lock);
-
+#ifdef MYSQL_NOT_THREAD_SAFE
+	slurm_mutex_lock(&mysql_lock);
+#endif
 	/* clear out the old results so we don't get a 2014 error */
 	_clear_results(mysql_db);		
 	if(mysql_commit(mysql_db)) {
@@ -332,18 +346,22 @@ extern int mysql_db_commit(MYSQL *mysql_db)
 		      mysql_errno(mysql_db),
 		      mysql_error(mysql_db));
 		errno = mysql_errno(mysql_db);
-		//slurm_mutex_unlock(&mysql_lock);
+#ifdef MYSQL_NOT_THREAD_SAFE
+		slurm_mutex_unlock(&mysql_lock);
+#endif
 		return SLURM_ERROR;
 	}
-	//slurm_mutex_unlock(&mysql_lock);
-
+#ifdef MYSQL_NOT_THREAD_SAFE
+	slurm_mutex_unlock(&mysql_lock);
+#endif
 	return SLURM_SUCCESS;
 }
 
 extern int mysql_db_rollback(MYSQL *mysql_db)
 {
-	//slurm_mutex_lock(&mysql_lock);
-
+#ifdef MYSQL_NOT_THREAD_SAFE
+	slurm_mutex_lock(&mysql_lock);
+#endif
 	/* clear out the old results so we don't get a 2014 error */
 	_clear_results(mysql_db);		
 	if(mysql_rollback(mysql_db)) {
@@ -351,12 +369,15 @@ extern int mysql_db_rollback(MYSQL *mysql_db)
 		      mysql_errno(mysql_db),
 		      mysql_error(mysql_db));
 		errno = mysql_errno(mysql_db);
-		//slurm_mutex_unlock(&mysql_lock);
+#ifdef MYSQL_NOT_THREAD_SAFE
+		slurm_mutex_unlock(&mysql_lock);
+#endif
 		return SLURM_ERROR;
 	}
 	//mysql_db_query(mysql_db, "unlock tables;");
-	//slurm_mutex_unlock(&mysql_lock);
-
+#ifdef MYSQL_NOT_THREAD_SAFE
+	slurm_mutex_unlock(&mysql_lock);
+#endif
 	return SLURM_SUCCESS;
 
 }
diff --git a/src/plugins/accounting_storage/Makefile.am b/src/plugins/accounting_storage/Makefile.am
index c4879b80a..4c228e189 100644
--- a/src/plugins/accounting_storage/Makefile.am
+++ b/src/plugins/accounting_storage/Makefile.am
@@ -1,3 +1,3 @@
 # Makefile for storage plugins
 
-SUBDIRS = filetxt gold mysql pgsql none slurmdbd
+SUBDIRS = filetxt mysql pgsql none slurmdbd
diff --git a/src/plugins/accounting_storage/Makefile.in b/src/plugins/accounting_storage/Makefile.in
index 251b0d160..c8a98efcc 100644
--- a/src/plugins/accounting_storage/Makefile.in
+++ b/src/plugins/accounting_storage/Makefile.in
@@ -247,7 +247,7 @@ target_os = @target_os@
 target_vendor = @target_vendor@
 top_builddir = @top_builddir@
 top_srcdir = @top_srcdir@
-SUBDIRS = filetxt gold mysql pgsql none slurmdbd
+SUBDIRS = filetxt mysql pgsql none slurmdbd
 all: all-recursive
 
 .SUFFIXES:
diff --git a/src/plugins/accounting_storage/filetxt/accounting_storage_filetxt.c b/src/plugins/accounting_storage/filetxt/accounting_storage_filetxt.c
index a37ad84ee..fd8ed1ba2 100644
--- a/src/plugins/accounting_storage/filetxt/accounting_storage_filetxt.c
+++ b/src/plugins/accounting_storage/filetxt/accounting_storage_filetxt.c
@@ -240,7 +240,8 @@ extern int fini ( void )
 	return SLURM_SUCCESS;
 }
 
-extern void * acct_storage_p_get_connection(bool make_agent, bool rollback)
+extern void * acct_storage_p_get_connection(bool make_agent, int conn_num,
+					    bool rollback)
 {
 	return NULL;
 }
@@ -319,6 +320,13 @@ extern List acct_storage_p_modify_associations(void *db_conn, uint32_t uid,
 	return SLURM_SUCCESS;
 }
 
+extern List acct_storage_p_modify_qos(void *db_conn, uint32_t uid,
+				      acct_qos_cond_t *qos_cond,
+				      acct_qos_rec_t *qos)
+{
+	return SLURM_SUCCESS;
+}
+
 extern List acct_storage_p_remove_users(void *db_conn, uint32_t uid,
 				       acct_user_cond_t *user_q)
 {
@@ -449,7 +457,7 @@ extern int clusteracct_storage_p_get_usage(
 /* 
  * load into the storage the start of a job
  */
-extern int jobacct_storage_p_job_start(void *db_conn,
+extern int jobacct_storage_p_job_start(void *db_conn, char *cluster_name,
 				       struct job_record *job_ptr)
 {
 	int	i,
@@ -668,6 +676,7 @@ extern int jobacct_storage_p_step_complete(void *db_conn,
 	float ave_vsize = 0, ave_rss = 0, ave_pages = 0;
 	float ave_cpu = 0, ave_cpu2 = 0;
 	char *account;
+	uint32_t exit_code;
 
 	if(!storage_init) {
 		debug("jobacct init was not called or it failed");
@@ -684,7 +693,12 @@ extern int jobacct_storage_p_step_complete(void *db_conn,
 	
 	if ((elapsed=now-step_ptr->start_time)<0)
 		elapsed=0;	/* For *very* short jobs, if clock is wrong */
-	if (step_ptr->exit_code)
+
+	exit_code = step_ptr->exit_code;
+	if (exit_code == NO_VAL) {
+		comp_status = JOB_CANCELLED;
+		exit_code = 0;
+	} else if (exit_code)
 		comp_status = JOB_FAILED;
 	else
 		comp_status = JOB_COMPLETE;
@@ -740,7 +754,7 @@ extern int jobacct_storage_p_step_complete(void *db_conn,
 		 JOB_STEP,
 		 step_ptr->step_id,	/* stepid */
 		 comp_status,		/* completion status */
-		 step_ptr->exit_code,	/* completion code */
+		 exit_code,	/* completion code */
 		 cpus,          	/* number of tasks */
 		 cpus,                  /* number of cpus */
 		 elapsed,	        /* elapsed seconds */
diff --git a/src/plugins/accounting_storage/gold/Makefile.am b/src/plugins/accounting_storage/gold/Makefile.am
deleted file mode 100644
index cd5495140..000000000
--- a/src/plugins/accounting_storage/gold/Makefile.am
+++ /dev/null
@@ -1,31 +0,0 @@
-# Makefile for accounting_storage/gold plugin
-
-AUTOMAKE_OPTIONS = foreign
-
-if HAVE_OPENSSL
-gold_lib = accounting_storage_gold.la
-else
-gold_lib =
-endif
-
-PLUGIN_FLAGS = -module -avoid-version --export-dynamic 
-
-INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common
-
-pkglib_LTLIBRARIES = $(gold_lib)
-if HAVE_OPENSSL
-accounting_storage_gold_la_SOURCES = accounting_storage_gold.c
-accounting_storage_gold_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
-
-# Add libcommon to provide some symbols that are not
-#  available in slurmctld (create_jobacct_job_rec)
-
-accounting_storage_gold_la_LIBADD  = \
-	$(top_builddir)/src/database/libslurm_gold.la	
-accounting_storage_gold_la_DEPENDENCIES = \
-	$(top_builddir)/src/database/libslurm_gold.la
-
-else
-EXTRA_accounting_storage_gold_la_SOURCES = accounting_storage_gold.c
-endif
-
diff --git a/src/plugins/accounting_storage/gold/Makefile.in b/src/plugins/accounting_storage/gold/Makefile.in
deleted file mode 100644
index c70269ee4..000000000
--- a/src/plugins/accounting_storage/gold/Makefile.in
+++ /dev/null
@@ -1,575 +0,0 @@
-# Makefile.in generated by automake 1.10.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
-# 2003, 2004, 2005, 2006, 2007, 2008  Free Software Foundation, Inc.
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# Makefile for accounting_storage/gold plugin
-
-VPATH = @srcdir@
-pkgdatadir = $(datadir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-target_triplet = @target@
-subdir = src/plugins/accounting_storage/gold
-DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
-	$(top_srcdir)/auxdir/slurm.m4 \
-	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
-	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
-	$(top_srcdir)/auxdir/x_ac_aix.m4 \
-	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
-	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
-	$(top_srcdir)/auxdir/x_ac_databases.m4 \
-	$(top_srcdir)/auxdir/x_ac_debug.m4 \
-	$(top_srcdir)/auxdir/x_ac_elan.m4 \
-	$(top_srcdir)/auxdir/x_ac_federation.m4 \
-	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
-	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
-	$(top_srcdir)/auxdir/x_ac_munge.m4 \
-	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
-	$(top_srcdir)/auxdir/x_ac_pam.m4 \
-	$(top_srcdir)/auxdir/x_ac_ptrace.m4 \
-	$(top_srcdir)/auxdir/x_ac_readline.m4 \
-	$(top_srcdir)/auxdir/x_ac_setpgrp.m4 \
-	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
-	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
-	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
-	$(ACLOCAL_M4)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
-CONFIG_CLEAN_FILES =
-am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
-am__vpath_adj = case $$p in \
-    $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
-    *) f=$$p;; \
-  esac;
-am__strip_dir = `echo $$p | sed -e 's|^.*/||'`;
-am__installdirs = "$(DESTDIR)$(pkglibdir)"
-pkglibLTLIBRARIES_INSTALL = $(INSTALL)
-LTLIBRARIES = $(pkglib_LTLIBRARIES)
-am__accounting_storage_gold_la_SOURCES_DIST =  \
-	accounting_storage_gold.c
-@HAVE_OPENSSL_TRUE@am_accounting_storage_gold_la_OBJECTS =  \
-@HAVE_OPENSSL_TRUE@	accounting_storage_gold.lo
-am__EXTRA_accounting_storage_gold_la_SOURCES_DIST =  \
-	accounting_storage_gold.c
-accounting_storage_gold_la_OBJECTS =  \
-	$(am_accounting_storage_gold_la_OBJECTS)
-accounting_storage_gold_la_LINK = $(LIBTOOL) --tag=CC \
-	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CCLD) \
-	$(AM_CFLAGS) $(CFLAGS) $(accounting_storage_gold_la_LDFLAGS) \
-	$(LDFLAGS) -o $@
-@HAVE_OPENSSL_TRUE@am_accounting_storage_gold_la_rpath = -rpath \
-@HAVE_OPENSSL_TRUE@	$(pkglibdir)
-DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm
-depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp
-am__depfiles_maybe = depfiles
-COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
-	$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
-LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
-	--mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
-	$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
-CCLD = $(CC)
-LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
-	--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
-	$(LDFLAGS) -o $@
-SOURCES = $(accounting_storage_gold_la_SOURCES) \
-	$(EXTRA_accounting_storage_gold_la_SOURCES)
-DIST_SOURCES = $(am__accounting_storage_gold_la_SOURCES_DIST) \
-	$(am__EXTRA_accounting_storage_gold_la_SOURCES_DIST)
-ETAGS = etags
-CTAGS = ctags
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AR = @AR@
-AUTHD_CFLAGS = @AUTHD_CFLAGS@
-AUTHD_LIBS = @AUTHD_LIBS@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-BG_INCLUDES = @BG_INCLUDES@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CMD_LDFLAGS = @CMD_LDFLAGS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CXX = @CXX@
-CXXCPP = @CXXCPP@
-CXXDEPMODE = @CXXDEPMODE@
-CXXFLAGS = @CXXFLAGS@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-DSYMUTIL = @DSYMUTIL@
-ECHO = @ECHO@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-ELAN_LIBS = @ELAN_LIBS@
-EXEEXT = @EXEEXT@
-F77 = @F77@
-FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@
-FFLAGS = @FFLAGS@
-GREP = @GREP@
-GTK2_CFLAGS = @GTK2_CFLAGS@
-GTK2_LIBS = @GTK2_LIBS@
-HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@
-HAVEPGCONFIG = @HAVEPGCONFIG@
-HAVEPKGCONFIG = @HAVEPKGCONFIG@
-HAVE_AIX = @HAVE_AIX@
-HAVE_ELAN = @HAVE_ELAN@
-HAVE_FEDERATION = @HAVE_FEDERATION@
-HAVE_OPENSSL = @HAVE_OPENSSL@
-HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-LDFLAGS = @LDFLAGS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LIBTOOL = @LIBTOOL@
-LIB_LDFLAGS = @LIB_LDFLAGS@
-LN_S = @LN_S@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MKDIR_P = @MKDIR_P@
-MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@
-MUNGE_LDFLAGS = @MUNGE_LDFLAGS@
-MUNGE_LIBS = @MUNGE_LIBS@
-MYSQL_CFLAGS = @MYSQL_CFLAGS@
-MYSQL_LIBS = @MYSQL_LIBS@
-NCURSES = @NCURSES@
-NMEDIT = @NMEDIT@
-NUMA_LIBS = @NUMA_LIBS@
-OBJEXT = @OBJEXT@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PAM_LIBS = @PAM_LIBS@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PGSQL_CFLAGS = @PGSQL_CFLAGS@
-PGSQL_LIBS = @PGSQL_LIBS@
-PLPA_LIBS = @PLPA_LIBS@
-PROCTRACKDIR = @PROCTRACKDIR@
-PROJECT = @PROJECT@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-RANLIB = @RANLIB@
-READLINE_LIBS = @READLINE_LIBS@
-RELEASE = @RELEASE@
-SED = @SED@
-SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
-SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SLURMCTLD_PORT = @SLURMCTLD_PORT@
-SLURMDBD_PORT = @SLURMDBD_PORT@
-SLURMD_PORT = @SLURMD_PORT@
-SLURM_API_AGE = @SLURM_API_AGE@
-SLURM_API_CURRENT = @SLURM_API_CURRENT@
-SLURM_API_MAJOR = @SLURM_API_MAJOR@
-SLURM_API_REVISION = @SLURM_API_REVISION@
-SLURM_API_VERSION = @SLURM_API_VERSION@
-SLURM_MAJOR = @SLURM_MAJOR@
-SLURM_MICRO = @SLURM_MICRO@
-SLURM_MINOR = @SLURM_MINOR@
-SLURM_VERSION = @SLURM_VERSION@
-SO_LDFLAGS = @SO_LDFLAGS@
-SSL_CPPFLAGS = @SSL_CPPFLAGS@
-SSL_LDFLAGS = @SSL_LDFLAGS@
-SSL_LIBS = @SSL_LIBS@
-STRIP = @STRIP@
-UTIL_LIBS = @UTIL_LIBS@
-VERSION = @VERSION@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-ac_ct_CXX = @ac_ct_CXX@
-ac_ct_F77 = @ac_ct_F77@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_vendor = @build_vendor@
-builddir = @builddir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target = @target@
-target_alias = @target_alias@
-target_cpu = @target_cpu@
-target_os = @target_os@
-target_vendor = @target_vendor@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-AUTOMAKE_OPTIONS = foreign
-@HAVE_OPENSSL_FALSE@gold_lib = 
-@HAVE_OPENSSL_TRUE@gold_lib = accounting_storage_gold.la
-PLUGIN_FLAGS = -module -avoid-version --export-dynamic 
-INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common
-pkglib_LTLIBRARIES = $(gold_lib)
-@HAVE_OPENSSL_TRUE@accounting_storage_gold_la_SOURCES = accounting_storage_gold.c
-@HAVE_OPENSSL_TRUE@accounting_storage_gold_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
-
-# Add libcommon to provide some symbols that are not
-#  available in slurmctld (create_jobacct_job_rec)
-@HAVE_OPENSSL_TRUE@accounting_storage_gold_la_LIBADD = \
-@HAVE_OPENSSL_TRUE@	$(top_builddir)/src/database/libslurm_gold.la	
-
-@HAVE_OPENSSL_TRUE@accounting_storage_gold_la_DEPENDENCIES = \
-@HAVE_OPENSSL_TRUE@	$(top_builddir)/src/database/libslurm_gold.la
-
-@HAVE_OPENSSL_FALSE@EXTRA_accounting_storage_gold_la_SOURCES = accounting_storage_gold.c
-all: all-am
-
-.SUFFIXES:
-.SUFFIXES: .c .lo .o .obj
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__configure_deps)
-	@for dep in $?; do \
-	  case '$(am__configure_deps)' in \
-	    *$$dep*) \
-	      cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \
-		&& exit 0; \
-	      exit 1;; \
-	  esac; \
-	done; \
-	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign  src/plugins/accounting_storage/gold/Makefile'; \
-	cd $(top_srcdir) && \
-	  $(AUTOMAKE) --foreign  src/plugins/accounting_storage/gold/Makefile
-.PRECIOUS: Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
-	@case '$?' in \
-	  *config.status*) \
-	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
-	  *) \
-	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
-	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
-	esac;
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
-	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
-	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
-	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES)
-	@$(NORMAL_INSTALL)
-	test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)"
-	@list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \
-	  if test -f $$p; then \
-	    f=$(am__strip_dir) \
-	    echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \
-	    $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \
-	  else :; fi; \
-	done
-
-uninstall-pkglibLTLIBRARIES:
-	@$(NORMAL_UNINSTALL)
-	@list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \
-	  p=$(am__strip_dir) \
-	  echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \
-	  $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \
-	done
-
-clean-pkglibLTLIBRARIES:
-	-test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES)
-	@list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \
-	  dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \
-	  test "$$dir" != "$$p" || dir=.; \
-	  echo "rm -f \"$${dir}/so_locations\""; \
-	  rm -f "$${dir}/so_locations"; \
-	done
-accounting_storage_gold.la: $(accounting_storage_gold_la_OBJECTS) $(accounting_storage_gold_la_DEPENDENCIES) 
-	$(accounting_storage_gold_la_LINK) $(am_accounting_storage_gold_la_rpath) $(accounting_storage_gold_la_OBJECTS) $(accounting_storage_gold_la_LIBADD) $(LIBS)
-
-mostlyclean-compile:
-	-rm -f *.$(OBJEXT)
-
-distclean-compile:
-	-rm -f *.tab.c
-
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/accounting_storage_gold.Plo@am__quote@
-
-.c.o:
-@am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
-@am__fastdepCC_TRUE@	mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@	$(COMPILE) -c $<
-
-.c.obj:
-@am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
-@am__fastdepCC_TRUE@	mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@	$(COMPILE) -c `$(CYGPATH_W) '$<'`
-
-.c.lo:
-@am__fastdepCC_TRUE@	$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
-@am__fastdepCC_TRUE@	mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@	$(LTCOMPILE) -c -o $@ $<
-
-mostlyclean-libtool:
-	-rm -f *.lo
-
-clean-libtool:
-	-rm -rf .libs _libs
-
-ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
-	list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
-	unique=`for i in $$list; do \
-	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
-	  done | \
-	  $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \
-	      END { if (nonempty) { for (i in files) print i; }; }'`; \
-	mkid -fID $$unique
-tags: TAGS
-
-TAGS:  $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
-		$(TAGS_FILES) $(LISP)
-	tags=; \
-	here=`pwd`; \
-	list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
-	unique=`for i in $$list; do \
-	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
-	  done | \
-	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
-	      END { if (nonempty) { for (i in files) print i; }; }'`; \
-	if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \
-	  test -n "$$unique" || unique=$$empty_fix; \
-	  $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
-	    $$tags $$unique; \
-	fi
-ctags: CTAGS
-CTAGS:  $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
-		$(TAGS_FILES) $(LISP)
-	tags=; \
-	list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
-	unique=`for i in $$list; do \
-	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
-	  done | \
-	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
-	      END { if (nonempty) { for (i in files) print i; }; }'`; \
-	test -z "$(CTAGS_ARGS)$$tags$$unique" \
-	  || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
-	     $$tags $$unique
-
-GTAGS:
-	here=`$(am__cd) $(top_builddir) && pwd` \
-	  && cd $(top_srcdir) \
-	  && gtags -i $(GTAGS_ARGS) $$here
-
-distclean-tags:
-	-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
-
-distdir: $(DISTFILES)
-	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
-	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
-	list='$(DISTFILES)'; \
-	  dist_files=`for file in $$list; do echo $$file; done | \
-	  sed -e "s|^$$srcdirstrip/||;t" \
-	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
-	case $$dist_files in \
-	  */*) $(MKDIR_P) `echo "$$dist_files" | \
-			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
-			   sort -u` ;; \
-	esac; \
-	for file in $$dist_files; do \
-	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
-	  if test -d $$d/$$file; then \
-	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
-	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
-	      cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \
-	    fi; \
-	    cp -pR $$d/$$file $(distdir)$$dir || exit 1; \
-	  else \
-	    test -f $(distdir)/$$file \
-	    || cp -p $$d/$$file $(distdir)/$$file \
-	    || exit 1; \
-	  fi; \
-	done
-check-am: all-am
-check: check-am
-all-am: Makefile $(LTLIBRARIES)
-installdirs:
-	for dir in "$(DESTDIR)$(pkglibdir)"; do \
-	  test -z "$$dir" || $(MKDIR_P) "$$dir"; \
-	done
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
-	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
-	$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
-	  install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
-	  `test -z '$(STRIP)' || \
-	    echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
-mostlyclean-generic:
-
-clean-generic:
-
-distclean-generic:
-	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
-
-maintainer-clean-generic:
-	@echo "This command is intended for maintainers to use"
-	@echo "it deletes files that may require special tools to rebuild."
-clean: clean-am
-
-clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \
-	mostlyclean-am
-
-distclean: distclean-am
-	-rm -rf ./$(DEPDIR)
-	-rm -f Makefile
-distclean-am: clean-am distclean-compile distclean-generic \
-	distclean-tags
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-info: info-am
-
-info-am:
-
-install-data-am:
-
-install-dvi: install-dvi-am
-
-install-exec-am: install-pkglibLTLIBRARIES
-
-install-html: install-html-am
-
-install-info: install-info-am
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-ps: install-ps-am
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
-	-rm -rf ./$(DEPDIR)
-	-rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-compile mostlyclean-generic \
-	mostlyclean-libtool
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am: uninstall-pkglibLTLIBRARIES
-
-.MAKE: install-am install-strip
-
-.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \
-	clean-libtool clean-pkglibLTLIBRARIES ctags distclean \
-	distclean-compile distclean-generic distclean-libtool \
-	distclean-tags distdir dvi dvi-am html html-am info info-am \
-	install install-am install-data install-data-am install-dvi \
-	install-dvi-am install-exec install-exec-am install-html \
-	install-html-am install-info install-info-am install-man \
-	install-pdf install-pdf-am install-pkglibLTLIBRARIES \
-	install-ps install-ps-am install-strip installcheck \
-	installcheck-am installdirs maintainer-clean \
-	maintainer-clean-generic mostlyclean mostlyclean-compile \
-	mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
-	tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/src/plugins/accounting_storage/gold/accounting_storage_gold.c b/src/plugins/accounting_storage/gold/accounting_storage_gold.c
deleted file mode 100644
index d7349ecc4..000000000
--- a/src/plugins/accounting_storage/gold/accounting_storage_gold.c
+++ /dev/null
@@ -1,3246 +0,0 @@
-/*****************************************************************************\
- *  accounting_storage_gold.c - accounting interface to gold.
- *
- *  $Id: accounting_gold.c 13061 2008-01-22 21:23:56Z da $
- *****************************************************************************
- *  Copyright (C) 2004-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
- *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  Written by Danny Auble <da@llnl.gov>
- *  
- *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
- *  
- *  SLURM is free software; you can redistribute it and/or modify it under
- *  the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  In addition, as a special exception, the copyright holders give permission 
- *  to link the code of portions of this program with the OpenSSL library under
- *  certain conditions as described in each individual source file, and 
- *  distribute linked combinations including the two. You must obey the GNU 
- *  General Public License in all respects for all of the code used other than 
- *  OpenSSL. If you modify file(s) with this exception, you may extend this 
- *  exception to your version of the file(s), but you are not obligated to do 
- *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in 
- *  the program, then also delete it here.
- *  
- *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
- *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
- *  details.
- *  
- *  You should have received a copy of the GNU General Public License along
- *  with SLURM; if not, write to the Free Software Foundation, Inc.,
- *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
-\*****************************************************************************/
-
-#include <stdlib.h>
-#include <ctype.h>
-#include <sys/stat.h>
-#include <pwd.h>
-
-
-#include "src/common/xmalloc.h"
-#include "src/common/list.h"
-#include "src/common/xstring.h"
-#include "src/common/uid.h"
-#include <src/common/parse_time.h>
-
-#include "src/slurmctld/slurmctld.h"
-#include "src/slurmd/slurmd/slurmd.h"
-#include "src/slurmdbd/read_config.h"
-#include "src/common/slurm_protocol_api.h"
-#include "src/common/slurm_accounting_storage.h"
-#include "src/common/jobacct_common.h"
-
-#include "src/database/gold_interface.h"
-
-/*
- * These variables are required by the generic plugin interface.  If they
- * are not found in the plugin, the plugin loader will ignore it.
- *
- * plugin_name - a string giving a human-readable description of the
- * plugin.  There is no maximum length, but the symbol must refer to
- * a valid string.
- *
- * plugin_type - a string suggesting the type of the plugin or its
- * applicability to a particular form of data or method of data handling.
- * If the low-level plugin API is used, the contents of this string are
- * unimportant and may be anything.  SLURM uses the higher-level plugin
- * interface which requires this string to be of the form
- *
- *	<application>/<method>
- *
- * where <application> is a description of the intended application of
- * the plugin (e.g., "jobacct" for SLURM job completion logging) and <method>
- * is a description of how this plugin satisfies that application.  SLURM will
- * only load job completion logging plugins if the plugin_type string has a 
- * prefix of "jobacct/".
- *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum versions for their plugins as the job accounting API 
- * matures.
- */
-const char plugin_name[] = "Accounting storage GOLD plugin";
-const char plugin_type[] = "accounting_storage/gold";
-const uint32_t plugin_version = 100;
-
-static List local_association_list = NULL;
-
-static int _add_edit_job(struct job_record *job_ptr, gold_object_t action);
-static int _check_for_job(uint32_t jobid, time_t submit);
-static List _get_association_list_from_response(gold_response_t *gold_response);
-/* static int _get_cluster_accounting_list_from_response( */
-/* 	gold_response_t *gold_response,  */
-/* 	acct_cluster_rec_t *cluster_rec); */
-/* static int _get_acct_accounting_list_from_response( */
-/* 	gold_response_t *gold_response, */
-/* 	acct_association_rec_t *account_rec); */
-static List _get_user_list_from_response(gold_response_t *gold_response);
-static List _get_acct_list_from_response(gold_response_t *gold_response);
-static List _get_cluster_list_from_response(gold_response_t *gold_response);
-static int _remove_association_accounting(List id_list);
-
-
-static int _add_edit_job(struct job_record *job_ptr, gold_object_t action)
-{
-	gold_request_t *gold_request = create_gold_request(GOLD_OBJECT_JOB,
-							   action);
-	gold_response_t *gold_response = NULL;
-	char tmp_buff[50];
-	int rc = SLURM_ERROR;
-	char *jname = NULL;
-	char *nodes = "(null)";
-
-	if(!gold_request) 
-		return rc;
-
-	if (job_ptr->nodes && job_ptr->nodes[0])
-		nodes = job_ptr->nodes;
-	
-	
-//info("total procs is  %d", job_ptr->total_procs);
-	if(action == GOLD_ACTION_CREATE) {
-		snprintf(tmp_buff, sizeof(tmp_buff), "%u", job_ptr->job_id);
-		gold_request_add_assignment(gold_request, "JobId", tmp_buff);
-		
-		snprintf(tmp_buff, sizeof(tmp_buff), "%u",
-			 (int)job_ptr->details->submit_time);
-		gold_request_add_assignment(gold_request, "SubmitTime",
-					    tmp_buff);
-	} else if (action == GOLD_ACTION_MODIFY) {
-		snprintf(tmp_buff, sizeof(tmp_buff), "%u", job_ptr->job_id);
-		gold_request_add_condition(gold_request, "JobId", tmp_buff,
-					   GOLD_OPERATOR_NONE, 0);
-		
-		snprintf(tmp_buff, sizeof(tmp_buff), "%u",
-			 (int)job_ptr->details->submit_time);
-		gold_request_add_condition(gold_request, "SubmitTime",
-					   tmp_buff,
-					   GOLD_OPERATOR_NONE, 0);
-	} else {
-		destroy_gold_request(gold_request);
-		error("_add_edit_job: bad action given %d", action);		
-		return rc;
-	}
-
-	if (job_ptr->name && job_ptr->name[0]) {
-		int i;
-		jname = xmalloc(strlen(job_ptr->name) + 1);
-		for (i=0; job_ptr->name[i]; i++) {
-			if (isalnum(job_ptr->name[i]))
-				jname[i] = job_ptr->name[i];
-			else
-				jname[i] = '_';
-		}
-	} else
-		jname = xstrdup("allocation");
-
-	gold_request_add_assignment(gold_request, "JobName", jname);
-	xfree(jname);
-	
-	gold_request_add_assignment(gold_request, "Partition",
-				    job_ptr->partition);
-	
-	snprintf(tmp_buff, sizeof(tmp_buff), "%u",
-		 job_ptr->total_procs);
-	gold_request_add_assignment(gold_request, "RequestedCPUCount",
-				    tmp_buff);
-	snprintf(tmp_buff, sizeof(tmp_buff), "%u",
-		 job_ptr->total_procs);
-	gold_request_add_assignment(gold_request, "AllocatedCPUCount",
-				    tmp_buff);
-	
-	snprintf(tmp_buff, sizeof(tmp_buff), "%u",
-		 (int)job_ptr->details->begin_time);
-	gold_request_add_assignment(gold_request, "EligibleTime",
-				    tmp_buff);
-
-	snprintf(tmp_buff, sizeof(tmp_buff), "%u", job_ptr->assoc_id);
-	gold_request_add_assignment(gold_request, "GoldAccountId", tmp_buff);
-
-	gold_request_add_assignment(gold_request, "NodeList", nodes);
-
-	if(job_ptr->job_state >= JOB_COMPLETE) {
-		snprintf(tmp_buff, sizeof(tmp_buff), "%u",
-			 (int)job_ptr->end_time);
-		gold_request_add_assignment(gold_request, "EndTime",
-					    tmp_buff);		
-		
-		snprintf(tmp_buff, sizeof(tmp_buff), "%u",
-			 (int)job_ptr->exit_code);
-		gold_request_add_assignment(gold_request, "ExitCode",
-					    tmp_buff);
-	}
-
-	snprintf(tmp_buff, sizeof(tmp_buff), "%u",
-		 (int)job_ptr->start_time);
-	gold_request_add_assignment(gold_request, "StartTime", tmp_buff);
-		
-	snprintf(tmp_buff, sizeof(tmp_buff), "%u",
-		 job_ptr->job_state & (~JOB_COMPLETING));
-	gold_request_add_assignment(gold_request, "State", tmp_buff);	
-
-	gold_response = get_gold_response(gold_request);	
-	destroy_gold_request(gold_request);
-
-	if(!gold_response) {
-		error("_add_edit_job: no response received");
-		return rc;
-	}
-
-	if(!gold_response->rc) 
-		rc = SLURM_SUCCESS;
-	else {
-		if(gold_response->rc == 720)
-			error("gold_response has non-zero rc(%d): "
-			      "NOT PRINTING MESSAGE: this was a parser error",
-			      gold_response->rc);
-		else
-			error("gold_response has non-zero rc(%d): %s",
-			      gold_response->rc,
-			      gold_response->message);
-		errno = gold_response->rc;
-	}
-	destroy_gold_response(gold_response);
-
-	return rc;
-}
-
-static int _check_for_job(uint32_t jobid, time_t submit) 
-{
-	gold_request_t *gold_request = create_gold_request(GOLD_OBJECT_JOB,
-							   GOLD_ACTION_QUERY);
-	gold_response_t *gold_response = NULL;
-	char tmp_buff[50];
-	int rc = 0;
-
-	if(!gold_request) 
-		return rc;
-
-	gold_request_add_selection(gold_request, "JobId");
-
-	snprintf(tmp_buff, sizeof(tmp_buff), "%u", jobid);
-	gold_request_add_condition(gold_request, "JobId", tmp_buff,
-				   GOLD_OPERATOR_NONE, 0);
-
-	snprintf(tmp_buff, sizeof(tmp_buff), "%u", (int)submit);
-	gold_request_add_condition(gold_request, "SubmitTime", tmp_buff,
-				   GOLD_OPERATOR_NONE, 0);
-
-	gold_response = get_gold_response(gold_request);
-	destroy_gold_request(gold_request);
-
-	if(!gold_response) {
-		error("_check_for_job: no response received");
-		return 0;
-	}
-
-	if(gold_response->entry_cnt > 0) 
-		rc = 1;
-	destroy_gold_response(gold_response);
-	
-	return rc;
-}
-
-static List _get_association_list_from_response(gold_response_t *gold_response)
-{
-	ListIterator itr = NULL;
-	ListIterator itr2 = NULL;
-	List association_list = NULL;
-	acct_association_rec_t *acct_rec = NULL;
-	gold_response_entry_t *resp_entry = NULL;
-	gold_name_value_t *name_val = NULL;
-	
-	association_list = list_create(destroy_acct_association_rec);
-	
-	itr = list_iterator_create(gold_response->entries);
-	while((resp_entry = list_next(itr))) {
-		acct_rec = xmalloc(sizeof(acct_association_rec_t));
-
-		itr2 = list_iterator_create(resp_entry->name_val);
-		while((name_val = list_next(itr2))) {
-			if(!strcmp(name_val->name, "Id")) {
-				acct_rec->id = 
-					atoi(name_val->value);
-			} else if(!strcmp(name_val->name, 
-					  "FairShare")) {
-				acct_rec->fairshare = 
-					atoi(name_val->value);
-			} else if(!strcmp(name_val->name, 
-					  "MaxJobs")) {
-				acct_rec->max_jobs = 
-					atoi(name_val->value);
-			} else if(!strcmp(name_val->name, 
-					  "MaxNodesPerJob")) {
-				acct_rec->max_nodes_per_job = 
-					atoi(name_val->value);
-			} else if(!strcmp(name_val->name, 
-					  "MaxWallDurationPerJob")) {
-				acct_rec->max_wall_duration_per_job = 
-					atoi(name_val->value);
-			} else if(!strcmp(name_val->name, 
-					  "MaxProcSecondsPerJob")) {
-				acct_rec->max_cpu_secs_per_job = 
-					atoi(name_val->value);
-			} else if(!strcmp(name_val->name, 
-					  "User")) {
-				if(strcmp(name_val->name, "NONE"))
-					acct_rec->user = 
-						xstrdup(name_val->value);
-			} else if(!strcmp(name_val->name, 
-					  "Project")) {
-				acct_rec->acct = 
-					xstrdup(name_val->value);
-			} else if(!strcmp(name_val->name, 
-					  "Machine")) {
-				acct_rec->cluster = 
-					xstrdup(name_val->value);
-			} else {
-				error("Unknown name val of '%s' = '%s'",
-				      name_val->name, name_val->value);
-			}
-		}
-		list_iterator_destroy(itr2);
-		list_append(association_list, acct_rec);
-	}
-	list_iterator_destroy(itr);
-
-	return association_list;
-}
-
-/* static int _get_cluster_accounting_list_from_response( */
-/* 	gold_response_t *gold_response, */
-/* 	acct_cluster_rec_t *cluster_rec) */
-/* { */
-/* 	ListIterator itr = NULL; */
-/* 	ListIterator itr2 = NULL; */
-/* 	cluster_accounting_rec_t *clusteracct_rec = NULL; */
-/* 	gold_response_entry_t *resp_entry = NULL; */
-/* 	gold_name_value_t *name_val = NULL; */
-	
-/* 	if(gold_response->entry_cnt <= 0) { */
-/* 		debug2("_get_list_from_response: No entries given"); */
-/* 		return SLURM_ERROR; */
-/* 	} */
-/* 	if(!cluster_rec->accounting_list) */
-/* 		cluster_rec->accounting_list =  */
-/* 			list_create(destroy_cluster_accounting_rec); */
-	
-/* 	itr = list_iterator_create(gold_response->entries); */
-/* 	while((resp_entry = list_next(itr))) { */
-/* 		clusteracct_rec = xmalloc(sizeof(cluster_accounting_rec_t)); */
-/* 		itr2 = list_iterator_create(resp_entry->name_val); */
-/* 		while((name_val = list_next(itr2))) { */
-/* 			if(!strcmp(name_val->name, "CPUCount")) { */
-/* 				clusteracct_rec->cpu_count =  */
-/* 					atoi(name_val->value); */
-/* 			} else if(!strcmp(name_val->name,  */
-/* 					  "PeriodStart")) { */
-/* 				clusteracct_rec->period_start =  */
-/* 					atoi(name_val->value); */
-/* 			} else if(!strcmp(name_val->name,  */
-/* 					  "IdleCPUSeconds")) { */
-/* 				clusteracct_rec->idle_secs =  */
-/* 					atoi(name_val->value); */
-/* 			} else if(!strcmp(name_val->name,  */
-/* 					  "DownCPUSeconds")) { */
-/* 				clusteracct_rec->down_secs =  */
-/* 					atoi(name_val->value); */
-/* 			} else if(!strcmp(name_val->name,  */
-/* 					  "AllocatedCPUSeconds")) { */
-/* 				clusteracct_rec->alloc_secs =  */
-/* 					atoi(name_val->value); */
-/* 			} else if(!strcmp(name_val->name,  */
-/* 					  "ReservedCPUSeconds")) { */
-/* 				clusteracct_rec->resv_secs =  */
-/* 					atoi(name_val->value); */
-/* 			} else { */
-/* 				error("Unknown name val of '%s' = '%s'", */
-/* 				      name_val->name, name_val->value); */
-/* 			} */
-/* 		} */
-/* 		list_iterator_destroy(itr2); */
-/* 		list_append(cluster_rec->accounting_list, clusteracct_rec); */
-/* 	} */
-/* 	list_iterator_destroy(itr); */
-
-/* 	return SLURM_SUCCESS; */
-/* } */
-
-/* static int _get_acct_accounting_list_from_response( */
-/* 	gold_response_t *gold_response, */
-/* 	acct_association_rec_t *acct_rec) */
-/* { */
-/* 	ListIterator itr = NULL; */
-/* 	ListIterator itr2 = NULL; */
-/* 	acct_accounting_rec_t *accounting_rec = NULL; */
-/* 	gold_response_entry_t *resp_entry = NULL; */
-/* 	gold_name_value_t *name_val = NULL; */
-	
-/* 	if(!acct_rec->accounting_list) */
-/* 		acct_rec->accounting_list = */
-/* 			list_create(destroy_acct_accounting_rec); */
-	
-/* 	itr = list_iterator_create(gold_response->entries); */
-/* 	while((resp_entry = list_next(itr))) { */
-/* 		accounting_rec = xmalloc(sizeof(acct_accounting_rec_t)); */
-
-/* 		itr2 = list_iterator_create(resp_entry->name_val); */
-/* 		while((name_val = list_next(itr2))) { */
-/* 			if(!strcmp(name_val->name, "PeriodStart")) { */
-/* 				accounting_rec->period_start =  */
-/* 					atoi(name_val->value); */
-/* 			} else if(!strcmp(name_val->name, */
-/* 					  "AllocatedCPUSeconds")) { */
-/* 				accounting_rec->alloc_secs =  */
-/* 					atoi(name_val->value); */
-/* 			} else { */
-/* 				error("Unknown name val of '%s' = '%s'", */
-/* 				      name_val->name, name_val->value); */
-/* 			} */
-/* 		} */
-/* 		list_iterator_destroy(itr2); */
-/* 		list_append(acct_rec->accounting_list, accounting_rec); */
-/* 	} */
-/* 	list_iterator_destroy(itr); */
-
-/* 	return SLURM_SUCCESS; */
-	
-/* } */
-
-static List _get_user_list_from_response(gold_response_t *gold_response)
-{
-	ListIterator itr = NULL;
-	ListIterator itr2 = NULL;
-	List user_list = NULL;
-	acct_user_rec_t *user_rec = NULL;
-	gold_response_entry_t *resp_entry = NULL;
-	gold_name_value_t *name_val = NULL;
-	
-	user_list = list_create(destroy_acct_user_rec);
-	
-	itr = list_iterator_create(gold_response->entries);
-	while((resp_entry = list_next(itr))) {
-		user_rec = xmalloc(sizeof(acct_user_rec_t));
-
-		itr2 = list_iterator_create(resp_entry->name_val);
-		while((name_val = list_next(itr2))) {
-			if(!strcmp(name_val->name, "Name")) {
-				user_rec->name = 
-					xstrdup(name_val->value);
-			} /* else if(!strcmp(name_val->name, "Expedite")) { */
-/* 				if(user_rec->qos_list) */
-/* 					continue; */
-/* 				user_rec->qos_list =  */
-/* 					list_create(slurm_destroy_char);  */
-/* 				/\*really needs to have 1 added here */
-/* 				  but we shouldn't ever need to use */
-/* 				  this. */
-/* 				*\/ */
-/* 				slurm_addto_char_list(user_rec->qos_list, */
-/* 						      name_val->value); */
-/* 			}  */else if(!strcmp(name_val->name, "DefaultProject")) {
-				user_rec->default_acct = 
-					xstrdup(name_val->value);
-			} else {
-				error("Unknown name val of '%s' = '%s'",
-				      name_val->name, name_val->value);
-			}
-		}
-		list_iterator_destroy(itr2);
-		list_append(user_list, user_rec);
-	}
-	list_iterator_destroy(itr);
-
-	return user_list;
-}
-
-static List _get_acct_list_from_response(gold_response_t *gold_response)
-{
-	ListIterator itr = NULL;
-	ListIterator itr2 = NULL;
-	List acct_list = NULL;
-	acct_account_rec_t *acct_rec = NULL;
-	gold_response_entry_t *resp_entry = NULL;
-	gold_name_value_t *name_val = NULL;
-	
-	acct_list = list_create(destroy_acct_account_rec);
-	
-	itr = list_iterator_create(gold_response->entries);
-	while((resp_entry = list_next(itr))) {
-		acct_rec = xmalloc(sizeof(acct_account_rec_t));
-
-		itr2 = list_iterator_create(resp_entry->name_val);
-		while((name_val = list_next(itr2))) {
-			/* if(!strcmp(name_val->name, "Expedite")) { */
-/* 				acct_rec->qos =  */
-/* 					atoi(name_val->value)+1; */
-/* 			} else */ if(!strcmp(name_val->name, 
-					  "Name")) {
-				acct_rec->name = 
-					xstrdup(name_val->value);
-			} else if(!strcmp(name_val->name, 
-					  "Organization")) {
-				acct_rec->organization = 
-					xstrdup(name_val->value);
-			} else if(!strcmp(name_val->name, 
-					  "Description")) {
-				acct_rec->description = 
-					xstrdup(name_val->value);
-			} else {
-				error("Unknown name val of '%s' = '%s'",
-				      name_val->name, name_val->value);
-			}
-		}
-		list_iterator_destroy(itr2);
-		list_append(acct_list, acct_rec);
-	}
-	list_iterator_destroy(itr);
-
-	return acct_list;
-}
-
-static List _get_cluster_list_from_response(gold_response_t *gold_response)
-{
-	ListIterator itr = NULL;
-	ListIterator itr2 = NULL;
-	List cluster_list = NULL;
-	acct_cluster_rec_t *cluster_rec = NULL;
-	gold_response_entry_t *resp_entry = NULL;
-	gold_name_value_t *name_val = NULL;
-	
-	cluster_list = list_create(destroy_acct_cluster_rec);
-	
-	itr = list_iterator_create(gold_response->entries);
-	while((resp_entry = list_next(itr))) {
-		cluster_rec = xmalloc(sizeof(acct_cluster_rec_t));
-
-		itr2 = list_iterator_create(resp_entry->name_val);
-		while((name_val = list_next(itr2))) {
-			if(!strcmp(name_val->name, 
-					  "Name")) {
-				cluster_rec->name = 
-					xstrdup(name_val->value);
-			} else {
-				error("Unknown name val of '%s' = '%s'",
-				      name_val->name, name_val->value);
-			}
-		}
-		list_iterator_destroy(itr2);
-		list_append(cluster_list, cluster_rec);
-	}
-	list_iterator_destroy(itr);
-
-	return cluster_list;
-}
-
-static int _remove_association_accounting(List id_list)
-{
-	gold_request_t *gold_request = NULL;
-	gold_response_t *gold_response = NULL;
-	int rc = SLURM_SUCCESS;
-	char *object = NULL;
-	int set = 0;
-	ListIterator itr = NULL;
-
-	gold_request = create_gold_request(GOLD_OBJECT_ACCT_HOUR_USAGE,
-					   GOLD_ACTION_DELETE);
-	if(!gold_request) { 
-		error("couldn't create gold_request");
-		rc = SLURM_ERROR;
-		return rc;
-	}
-	
-	if(id_list && list_count(id_list)) {
-		itr = list_iterator_create(id_list);
-		if(list_count(id_list) > 1)
-			set = 2;
-		else
-			set = 0;
-		
-		while((object = list_next(itr))) {
-			gold_request_add_condition(gold_request, "Acct",
-						   object,
-						   GOLD_OPERATOR_NONE, set);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-	}
-			       
-	gold_response = get_gold_response(gold_request);	
-
-	if(!gold_response) {
-		error("acct_storage_p_modify_associations: "
-		      "no response received");
-		destroy_gold_request(gold_request);
-		rc = SLURM_ERROR;
-		return rc;
-	}
-		
-	if(gold_response->rc) {
-		error("gold_response has non-zero rc(%d): %s",
-		      gold_response->rc,
-		      gold_response->message);
-		errno = gold_response->rc;
-		destroy_gold_request(gold_request);
-		destroy_gold_response(gold_response);
-		rc = SLURM_ERROR;
-		return rc;
-	}
-
-	destroy_gold_response(gold_response);
-
-	gold_request->object = GOLD_OBJECT_ACCT_DAY_USAGE;	
-	gold_response = get_gold_response(gold_request);	
-	
-	if(!gold_response) {
-		error("acct_storage_p_modify_associations: "
-		      "no response received");
-		destroy_gold_request(gold_request);
-		rc = SLURM_ERROR;
-		return rc;
-	}
-		
-	if(gold_response->rc) {
-		error("gold_response has non-zero rc(%d): %s",
-		      gold_response->rc,
-		      gold_response->message);
-		errno = gold_response->rc;
-		destroy_gold_request(gold_request);
-		destroy_gold_response(gold_response);
-		rc = SLURM_ERROR;
-		return rc;
-	}
-	destroy_gold_response(gold_response);
-	
-	gold_request->object = GOLD_OBJECT_ACCT_MONTH_USAGE;	
-	gold_response = get_gold_response(gold_request);	
-	destroy_gold_request(gold_request);
-		
-	if(!gold_response) {
-		error("acct_storage_p_modify_associations: "
-		      "no response received");
-		destroy_gold_request(gold_request);
-		rc = SLURM_ERROR;
-		return rc;
-	}
-		
-	if(gold_response->rc) {
-		error("gold_response has non-zero rc(%d): %s",
-		      gold_response->rc,
-		      gold_response->message);
-		errno = gold_response->rc;
-		rc = SLURM_ERROR;
-	}
-
-	destroy_gold_response(gold_response);		
-
-
-	return rc; 
-}
-
-/*
- * init() is called when the plugin is loaded, before any other functions
- * are called.  Put global initialization here.
- */
-extern int init ( void )
-{
-	char *keyfile = NULL;
-	char *host = NULL;
-	uint32_t port = 0;
-	struct	stat statbuf;
-
-	if(!(keyfile = slurm_get_accounting_storage_pass()) 
-	   || strlen(keyfile) < 1) {
-		keyfile = xstrdup("/etc/gold/auth_key");
-		debug2("No keyfile specified with AcctStoragePass, "
-		       "gold using default %s", keyfile);
-	}
-	
-
-	if(stat(keyfile, &statbuf)) {
-		fatal("Can't stat key file %s. "
-		      "To run acct_storage/gold you have to set "
-		      "your gold keyfile as "
-		      "AcctStoragePass in your slurm.conf", keyfile);
-	}
-
-
-	if(!(host = slurm_get_accounting_storage_host())) {
-		host = xstrdup("localhost");
-		debug2("No host specified with AcctStorageHost, "
-		       "gold using default %s", host);
-	}
-
-	if(!(port = slurm_get_accounting_storage_port())) {
-		port = 7112;
-		debug2("No port specified with AcctStoragePort, "
-		       "gold using default %u", port);
-	}
-
-	debug2("connecting to gold with keyfile='%s' for %s(%d)",
-	       keyfile, host, port);
-
-	init_gold(keyfile, host, port);
-
-	xfree(keyfile);
-	xfree(host);
-
-	verbose("%s loaded", plugin_name);
-	return SLURM_SUCCESS;
-}
-
-extern int fini ( void )
-{
-	if(local_association_list)
-		list_destroy(local_association_list);
-	fini_gold();
-	return SLURM_SUCCESS;
-}
-
-extern void * acct_storage_p_get_connection(bool make_agent, bool rollback)
-{
-	return NULL;
-}
-
-extern int acct_storage_p_close_connection(void **db_conn)
-{
-	return SLURM_SUCCESS;
-}
-
-extern int acct_storage_p_commit(void *db_conn, bool commit)
-{
-	return SLURM_SUCCESS;
-}
-
-extern int acct_storage_p_add_users(void *db_conn,
-				    List user_list)
-{
-	ListIterator itr = NULL;
-	int rc = SLURM_SUCCESS;
-	gold_request_t *gold_request = NULL;
-	gold_response_t *gold_response = NULL;
-	acct_user_rec_t *object = NULL;
-//	char tmp_buff[50];
-
-	itr = list_iterator_create(user_list);
-	while((object = list_next(itr))) {
-		if(!object->name || !object->default_acct) {
-			error("We need a user name and "
-			      "default acct to add.");
-			rc = SLURM_ERROR;
-			continue;
-		}
-		gold_request = create_gold_request(GOLD_OBJECT_USER,
-						   GOLD_ACTION_CREATE);
-		if(!gold_request) { 
-			error("couldn't create gold_request");
-			rc = SLURM_ERROR;
-			break;
-		}
-		gold_request_add_assignment(gold_request, "Name",
-					    object->name);		
-		gold_request_add_assignment(gold_request, "DefaultProject",
-					    object->default_acct);		
-
-/* 		if(object->qos != ACCT_QOS_NOTSET) { */
-/* 			snprintf(tmp_buff, sizeof(tmp_buff), "%u", */
-/* 				 object->qos-1); */
-/* 			gold_request_add_assignment(gold_request, "Expedite", */
-/* 						    tmp_buff); */
-/* 		}		 */
-		gold_response = get_gold_response(gold_request);	
-		destroy_gold_request(gold_request);
-
-		if(!gold_response) {
-			error("acct_storage_p_add_users: "
-			      "no response received");
-			rc = SLURM_ERROR;
-			break;
-		}
-		
-		if(gold_response->rc) {
-			error("gold_response has non-zero rc(%d): %s",
-			      gold_response->rc,
-			      gold_response->message);
-			errno = gold_response->rc;
-			destroy_gold_response(gold_response);
-			rc = SLURM_ERROR;
-			break;
-		}
-		destroy_gold_response(gold_response);		
-	}
-	list_iterator_destroy(itr);
-	
-	return rc;
-}
-
-extern int acct_storage_p_add_coord(void *db_conn,
-				    char *acct,
-				    acct_user_cond_t *user_q)
-{
-	return SLURM_SUCCESS;
-}
-
-extern int acct_storage_p_add_accts(void *db_conn,
-				    List acct_list)
-{
-	ListIterator itr = NULL;
-	int rc = SLURM_SUCCESS;
-	gold_request_t *gold_request = NULL;
-	gold_response_t *gold_response = NULL;
-	acct_account_rec_t *object = NULL;
-//	char tmp_buff[50];
-
-	itr = list_iterator_create(acct_list);
-	while((object = list_next(itr))) {
-		if(!object->name || !object->description
-		   || !object->organization) {
-			error("We need a acct name, description, and "
-			      "organization to add one.");
-			rc = SLURM_ERROR;
-			continue;
-		}
-		gold_request = create_gold_request(GOLD_OBJECT_PROJECT,
-						   GOLD_ACTION_CREATE);
-		if(!gold_request) { 
-			error("couldn't create gold_request");
-			rc = SLURM_ERROR;
-			break;
-		}
-		gold_request_add_assignment(gold_request, "Name",
-					    object->name);		
-		gold_request_add_assignment(gold_request, "Description",
-					    object->description);		
-		gold_request_add_assignment(gold_request, "Organization",
-					    object->organization);		
-/* 		if(object->qos != ACCT_QOS_NOTSET) { */
-/* 			snprintf(tmp_buff, sizeof(tmp_buff), "%u", */
-/* 				 object->qos-1); */
-/* 			gold_request_add_assignment(gold_request, "Expedite", */
-/* 						    tmp_buff); */
-/* 		}		 */
-		gold_response = get_gold_response(gold_request);	
-		destroy_gold_request(gold_request);
-
-		if(!gold_response) {
-			error("acct_storage_p_add_accts: "
-			      "no response received");
-			rc = SLURM_ERROR;
-			break;
-		}
-		
-		if(gold_response->rc) {
-			error("gold_response has non-zero rc(%d): %s",
-			      gold_response->rc,
-			      gold_response->message);
-			errno = gold_response->rc;
-			destroy_gold_response(gold_response);
-			rc = SLURM_ERROR;
-			break;
-		}
-		destroy_gold_response(gold_response);		
-	}
-	list_iterator_destroy(itr);
-	
-	return rc;
-}
-
-extern int acct_storage_p_add_clusters(void *db_conn,
-				       List cluster_list)
-{
-	ListIterator itr = NULL;
-	int rc = SLURM_SUCCESS;
-	gold_request_t *gold_request = NULL;
-	gold_response_t *gold_response = NULL;
-	acct_cluster_rec_t *object = NULL;
-
-	itr = list_iterator_create(cluster_list);
-	while((object = list_next(itr))) {
-		if(!object->name) {
-			error("We need a cluster name to add.");
-			rc = SLURM_ERROR;
-			continue;
-		}
-		gold_request = create_gold_request(GOLD_OBJECT_MACHINE,
-						   GOLD_ACTION_CREATE);
-		if(!gold_request) { 
-			error("couldn't create gold_request");
-			rc = SLURM_ERROR;
-			break;
-		}
-		gold_request_add_assignment(gold_request, "Name",
-					    object->name);		
-
-		gold_response = get_gold_response(gold_request);	
-		destroy_gold_request(gold_request);
-
-		if(!gold_response) {
-			error("acct_storage_p_add_clusters: "
-			      "no response received");
-			rc = SLURM_ERROR;
-			break;
-		}
-		
-		if(gold_response->rc) {
-			error("gold_response has non-zero rc(%d): %s",
-			      gold_response->rc,
-			      gold_response->message);
-			errno = gold_response->rc;
-			destroy_gold_response(gold_response);
-			rc = SLURM_ERROR;
-			break;
-		}
-		destroy_gold_response(gold_response);		
-	}
-	list_iterator_destroy(itr);
-	
-	return rc;
-}
-
-extern int acct_storage_p_add_associations(void *db_conn,
-					   List association_list)
-{
-	ListIterator itr = NULL;
-	int rc = SLURM_SUCCESS;
-	gold_request_t *gold_request = NULL;
-	gold_response_t *gold_response = NULL;
-	acct_association_rec_t *object = NULL;
-	char tmp_buff[50];
-
-	itr = list_iterator_create(association_list);
-	while((object = list_next(itr))) {
-		if(!object->cluster || !object->acct) {
-			error("We need a association cluster and "
-			      "acct to add one.");
-			rc = SLURM_ERROR;
-			continue;
-		}
-		gold_request = create_gold_request(GOLD_OBJECT_ACCT,
-						   GOLD_ACTION_CREATE);
-		if(!gold_request) { 
-			error("couldn't create gold_request");
-			rc = SLURM_ERROR;
-			break;
-		}
-		if(object->user) {
-			gold_request_add_assignment(gold_request, "User",
-						    object->user);		
-			snprintf(tmp_buff, sizeof(tmp_buff), 
-				 "%s on %s for %s",
-				 object->acct,
-				 object->cluster,
-				 object->user);
-		} else if(object->parent_acct)
-			snprintf(tmp_buff, sizeof(tmp_buff), 
-				 "%s of %s on %s",
-				 object->acct,
-				 object->parent_acct,
-				 object->cluster);
-		else
-			snprintf(tmp_buff, sizeof(tmp_buff), 
-				 "%s on %s",
-				 object->acct,
-				 object->cluster);
-			
-		gold_request_add_assignment(gold_request, "Name", tmp_buff);
-
-		gold_request_add_assignment(gold_request, "Project",
-					    object->acct);		
-		gold_request_add_assignment(gold_request, "Machine",
-					    object->cluster);	
-			
-		if(object->fairshare) {
-			snprintf(tmp_buff, sizeof(tmp_buff), "%u",
-				 object->fairshare);
-			gold_request_add_assignment(gold_request, "FairShare",
-						    tmp_buff);		
-		}
-
-		if(object->max_jobs) {
-			snprintf(tmp_buff, sizeof(tmp_buff), "%u", 
-				 object->max_jobs);
-			gold_request_add_assignment(gold_request, "MaxJobs",
-						    tmp_buff);
-		}
-		
-		if(object->max_nodes_per_job) {
-			snprintf(tmp_buff, sizeof(tmp_buff), "%u",
-				 object->max_nodes_per_job);
-			gold_request_add_assignment(gold_request,
-						    "MaxNodesPerJob",
-						    tmp_buff);
-		}
-
-		if(object->max_wall_duration_per_job) {
-			snprintf(tmp_buff, sizeof(tmp_buff), "%u",
-				 object->max_wall_duration_per_job);
-			gold_request_add_assignment(gold_request,
-						    "MaxWallDurationPerJob",
-						    tmp_buff);		
-		}
-
-		if(object->max_cpu_secs_per_job) {
-			snprintf(tmp_buff, sizeof(tmp_buff), "%u",
-				 object->max_cpu_secs_per_job);
-			gold_request_add_assignment(gold_request,
-						    "MaxProcSecondsPerJob",
-						    tmp_buff);		
-		}
-
-		gold_response = get_gold_response(gold_request);	
-		destroy_gold_request(gold_request);
-
-		if(!gold_response) {
-			error("acct_storage_p_add_associations: "
-			      "no response received");
-			rc = SLURM_ERROR;
-			break;
-		}
-		
-		if(gold_response->rc) {
-			error("gold_response has non-zero rc(%d): %s",
-			      gold_response->rc,
-			      gold_response->message);
-			errno = gold_response->rc;
-			destroy_gold_response(gold_response);
-			rc = SLURM_ERROR;
-			break;
-		}
-		destroy_gold_response(gold_response);		
-	}
-	list_iterator_destroy(itr);
-	
-	return rc;
-}
-
-extern int acct_storage_p_add_qos(void *db_conn, uint32_t uid, 
-				  List qos_list)
-{
-	return SLURM_SUCCESS;
-}
-
-extern List acct_storage_p_modify_users(void *db_conn,
-					acct_user_cond_t *user_q,
-					acct_user_rec_t *user)
-{
-	ListIterator itr = NULL;
-//	int rc = SLURM_SUCCESS;
-	gold_request_t *gold_request = NULL;
-	gold_response_t *gold_response = NULL;
-	char *object = NULL;
-//	char tmp_buff[50];
-	int set = 0;
-
-	if(!user_q) {
-		error("acct_storage_p_modify_users: "
-		      "we need conditions to modify");
-		return NULL;
-	}
-
-	if(!user) {
-		error("acct_storage_p_modify_users: "
-		      "we need something to change");
-		return NULL;
-	}
-
-	gold_request = create_gold_request(GOLD_OBJECT_USER,
-					   GOLD_ACTION_MODIFY);
-	if(!gold_request) { 
-		error("acct_storage_p_modify_users: "
-		      "couldn't create gold_request");
-		return NULL;
-	}
-
-	if(user_q->assoc_cond->user_list
-	   && list_count(user_q->assoc_cond->user_list)) {
-		itr = list_iterator_create(user_q->assoc_cond->user_list);
-		if(list_count(user_q->assoc_cond->user_list) > 1)
-			set = 2;
-		else
-			set = 0;
-		
-		while((object = list_next(itr))) {
-			gold_request_add_condition(gold_request, "Name",
-						   object,
-						   GOLD_OPERATOR_NONE, set);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-	}
-
-	if(user_q->def_acct_list && list_count(user_q->def_acct_list)) {
-		itr = list_iterator_create(user_q->def_acct_list);
-		if(list_count(user_q->def_acct_list) > 1)
-			set = 2;
-		else
-			set = 0;
-		
-		while((object = list_next(itr))) {
-			gold_request_add_condition(gold_request,
-						   "DefaultProject",
-						   object,
-						   GOLD_OPERATOR_NONE, set);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-	}
-
-	if(user->default_acct) 
-		gold_request_add_assignment(gold_request,
-					    "DefaultProject",
-					    user->default_acct);
-	
-/* 	if(user->qos != ACCT_QOS_NOTSET) { */
-/* 		snprintf(tmp_buff, sizeof(tmp_buff), "%u", */
-/* 			 user->qos-1); */
-/* 		gold_request_add_assignment(gold_request, "Expedite", */
-/* 					    tmp_buff);		 */
-/* 	} */
-
-	gold_response = get_gold_response(gold_request);	
-	destroy_gold_request(gold_request);
-
-	if(!gold_response) {
-		error("acct_storage_p_modify_users: "
-		      "no response received");
-		return NULL;
-	}
-	
-	if(gold_response->rc) {
-		error("gold_response has non-zero rc(%d): %s",
-		      gold_response->rc,
-		      gold_response->message);
-		errno = gold_response->rc;
-	}
-
-	destroy_gold_response(gold_response);		
-	
-	return NULL;
-}
-
-extern List acct_storage_p_modify_user_admin_level(void *db_conn,
-						   acct_user_cond_t *user_q)
-{
-	ListIterator itr = NULL;
-//	int rc = SLURM_SUCCESS;
-	gold_request_t *gold_request = NULL;
-	gold_response_t *gold_response = NULL;
-	char *object = NULL;
-	int set = 0;
-
-	if(!user_q || user_q->admin_level == ACCT_ADMIN_NOTSET) {
-		error("acct_storage_p_modify_users: "
-		      "we need conditions to modify");
-		return NULL;
-	}
-
-	if(user_q->admin_level == ACCT_ADMIN_NONE) 
-		gold_request = create_gold_request(GOLD_OBJECT_ROLEUSER,
-						   GOLD_ACTION_DELETE);
-	else 
-		gold_request = create_gold_request(GOLD_OBJECT_ROLEUSER,
-						   GOLD_ACTION_CREATE);
-	
-	if(!gold_request) { 
-		error("couldn't create gold_request");
-		return NULL;
-	}
-
-	if(user_q->admin_level == ACCT_ADMIN_NONE) {
-		gold_request_add_condition(gold_request,
-					   "Role",
-					   "SystemAdmin",
-					   GOLD_OPERATOR_NONE, 2);
-		
-		gold_request_add_condition(gold_request,
-					   "Role",
-					   "Operator",
-					   GOLD_OPERATOR_NONE, 1);
-	} else if(user_q->admin_level == ACCT_ADMIN_SUPER_USER)
-		gold_request_add_assignment(gold_request,
-					    "Role",
-					    "SystemAdmin");
-	else if(user_q->admin_level == ACCT_ADMIN_OPERATOR)
-		gold_request_add_assignment(gold_request,
-					    "Role",
-					    "Operator");
-	else {
-		error("acct_storage_p_modify_user_admin_level: "
-		      "unknown admin level %d", user_q->admin_level);
-		return NULL;
-	}
-
-	if(user_q->assoc_cond->user_list
-	   && list_count(user_q->assoc_cond->user_list)) {
-		itr = list_iterator_create(user_q->assoc_cond->user_list);
-		if(list_count(user_q->assoc_cond->user_list) > 1)
-			set = 2;
-		else
-			set = 0;
-		
-		while((object = list_next(itr))) {
-			gold_request_add_condition(gold_request, "Name",
-						   object,
-						   GOLD_OPERATOR_NONE, set);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-	}
-
-	if(user_q->def_acct_list && list_count(user_q->def_acct_list)) {
-		itr = list_iterator_create(user_q->def_acct_list);
-		if(list_count(user_q->def_acct_list) > 1)
-			set = 2;
-		else
-			set = 0;
-		
-		while((object = list_next(itr))) {
-			gold_request_add_condition(gold_request,
-						   "DefaultProject",
-						   object,
-						   GOLD_OPERATOR_NONE, set);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-	}
-
-	gold_response = get_gold_response(gold_request);	
-	destroy_gold_request(gold_request);
-	
-	if(!gold_response) {
-		error("acct_storage_p_modify_users: "
-		      "no response received");
-		return NULL;
-	}
-	
-	if(gold_response->rc) {
-		error("gold_response has non-zero rc(%d): %s",
-		      gold_response->rc,
-		      gold_response->message);
-		errno = gold_response->rc;
-		
-	}
-	destroy_gold_response(gold_response);	
-	
-	return NULL;
-}
-
-extern List acct_storage_p_modify_accts(void *db_conn,
-				       acct_account_cond_t *acct_q,
-				       acct_account_rec_t *acct)
-{
-	ListIterator itr = NULL;
-//	int rc = SLURM_SUCCESS;
-	gold_request_t *gold_request = NULL;
-	gold_response_t *gold_response = NULL;
-//	char tmp_buff[50];
-	int set = 0;
-	char *object = NULL;
-
-	if(!acct_q) {
-		error("acct_storage_p_modify_accts: "
-		      "we need conditions to modify");
-		return NULL;
-	}
-
-	if(!acct) {
-		error("acct_storage_p_modify_accts: "
-		      "we need something to change");
-		return NULL;
-	}
-	
-	gold_request = create_gold_request(GOLD_OBJECT_ACCT,
-					   GOLD_ACTION_MODIFY);
-	if(!gold_request) { 
-		error("couldn't create gold_request");
-		return NULL;
-	}
-
-	if(acct_q->assoc_cond->acct_list
-	   && list_count(acct_q->assoc_cond->acct_list)) {
-		itr = list_iterator_create(acct_q->assoc_cond->acct_list);
-		if(list_count(acct_q->assoc_cond->acct_list) > 1)
-			set = 2;
-		else
-			set = 0;
-		
-		while((object = list_next(itr))) {
-			gold_request_add_condition(gold_request, "Name",
-						   object,
-						   GOLD_OPERATOR_NONE, set);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-	}
-
-	if(acct_q->description_list 
-	   && list_count(acct_q->description_list)) {
-		itr = list_iterator_create(acct_q->description_list);
-		if(list_count(acct_q->description_list) > 1)
-			set = 2;
-		else
-			set = 0;
-		
-		while((object = list_next(itr))) {
-			gold_request_add_condition(gold_request, "Description",
-						   object,
-						   GOLD_OPERATOR_NONE, set);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-	}
-
-	if(acct_q->organization_list 
-	   && list_count(acct_q->organization_list)) {
-		itr = list_iterator_create(acct_q->organization_list);
-		if(list_count(acct_q->organization_list) > 1)
-			set = 2;
-		else
-			set = 0;
-		
-		while((object = list_next(itr))) {
-			gold_request_add_condition(gold_request, "Organization",
-						   object,
-						   GOLD_OPERATOR_NONE, set);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-	}
-
-	if(acct->description) 
-		gold_request_add_assignment(gold_request,
-					    "Description",
-					    acct->description);
-	if(acct->organization) 
-		gold_request_add_assignment(gold_request,
-					    "Organization",
-					    acct->organization);
-	
-/* 	if(acct->qos != ACCT_QOS_NOTSET) { */
-/* 		snprintf(tmp_buff, sizeof(tmp_buff), "%u", */
-/* 			 acct->qos-1); */
-/* 		gold_request_add_assignment(gold_request, "Expedite", */
-/* 					    tmp_buff);		 */
-/* 	} */
-	gold_response = get_gold_response(gold_request);	
-	destroy_gold_request(gold_request);
-	
-	if(!gold_response) {
-		error("acct_storage_p_modify_accts: "
-		      "no response received");
-		return NULL;
-	}
-	
-	if(gold_response->rc) {
-		error("gold_response has non-zero rc(%d): %s",
-		      gold_response->rc,
-		      gold_response->message);
-		errno = gold_response->rc;
-		
-	}
-
-	destroy_gold_response(gold_response);		
-	
-	return NULL;
-}
-
-extern List acct_storage_p_modify_clusters(void *db_conn,
-					  acct_cluster_cond_t *cluster_q,
-					  acct_cluster_rec_t *cluster)
-{
-	return SLURM_SUCCESS;
-}
-
-extern List acct_storage_p_modify_associations(void *db_conn,
-					      acct_association_cond_t *assoc_q,
-					      acct_association_rec_t *assoc)
-{
-	ListIterator itr = NULL;
-//	int rc = SLURM_SUCCESS;
-	gold_request_t *gold_request = NULL;
-	gold_response_t *gold_response = NULL;
-	char tmp_buff[50];
-	char *object = NULL;
-	int set = 0;
-
-	if(!assoc_q) {
-		error("acct_storage_p_modify_associations: "
-		      "we need conditions to modify");
-		return NULL;
-	}
-
-	if(!assoc) {
-		error("acct_storage_p_modify_associations: "
-		      "we need something to change");
-		return NULL;
-	}
-
-	gold_request = create_gold_request(GOLD_OBJECT_ACCT,
-					   GOLD_ACTION_MODIFY);
-	if(!gold_request) { 
-		error("couldn't create gold_request");
-		return NULL;
-	}
-
-	if(assoc_q->id_list && list_count(assoc_q->id_list)) {
-		itr = list_iterator_create(assoc_q->id_list);
-		if(list_count(assoc_q->id_list) > 1)
-			set = 2;
-		else
-			set = 0;
-		
-		while((object = list_next(itr))) {
-			gold_request_add_condition(gold_request, "Id",
-						   object,
-						   GOLD_OPERATOR_NONE, set);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-	}
-
-	if(assoc_q->user_list && list_count(assoc_q->user_list)) {
-		itr = list_iterator_create(assoc_q->user_list);
-		if(list_count(assoc_q->user_list) > 1)
-			set = 2;
-		else
-			set = 0;
-		
-		while((object = list_next(itr))) {
-			gold_request_add_condition(gold_request, "User",
-						   object,
-						   GOLD_OPERATOR_NONE, set);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-	}
-
-	if(assoc_q->acct_list && list_count(assoc_q->acct_list)) {
-		itr = list_iterator_create(assoc_q->acct_list);
-		if(list_count(assoc_q->acct_list) > 1)
-			set = 2;
-		else
-			set = 0;
-		
-		while((object = list_next(itr))) {
-			gold_request_add_condition(gold_request, "Project",
-						   object,
-						   GOLD_OPERATOR_NONE, set);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-	}
-
-	if(assoc_q->cluster_list && list_count(assoc_q->cluster_list)) {
-		itr = list_iterator_create(assoc_q->cluster_list);
-		if(list_count(assoc_q->cluster_list) > 1)
-			set = 2;
-		else
-			set = 0;
-		
-		while((object = list_next(itr))) {
-			gold_request_add_condition(gold_request, "Machine",
-						   object,
-						   GOLD_OPERATOR_NONE, set);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-	}
-
-	if(assoc->fairshare) {
-		snprintf(tmp_buff, sizeof(tmp_buff), "%u",
-			 assoc->fairshare);
-		gold_request_add_assignment(gold_request, "Fairshare",
-					    tmp_buff);		
-	}
-
-	if(assoc->max_jobs) {
-		snprintf(tmp_buff, sizeof(tmp_buff), "%u", 
-			 assoc->max_jobs);
-		gold_request_add_assignment(gold_request, "MaxJobs",
-					    tmp_buff);
-	}
-		
-	if(assoc->max_nodes_per_job) {
-		snprintf(tmp_buff, sizeof(tmp_buff), "%u",
-			 assoc->max_nodes_per_job);
-		gold_request_add_assignment(gold_request,
-					    "MaxNodesPerJob",
-					    tmp_buff);
-	}
-
-	if(assoc->max_wall_duration_per_job) {
-		snprintf(tmp_buff, sizeof(tmp_buff), "%u",
-			 assoc->max_wall_duration_per_job);
-		gold_request_add_assignment(gold_request,
-					    "MaxWallDurationPerJob",
-					    tmp_buff);		
-	}
-
-	if(assoc->max_cpu_secs_per_job) {
-		snprintf(tmp_buff, sizeof(tmp_buff), "%u",
-			 assoc->max_cpu_secs_per_job);
-		gold_request_add_assignment(gold_request,
-					    "MaxProcSecondsPerJob",
-					    tmp_buff);		
-	}
-
-	gold_response = get_gold_response(gold_request);	
-	destroy_gold_request(gold_request);
-
-	if(!gold_response) {
-		error("acct_storage_p_modify_associations: "
-		      "no response received");
-		return NULL;
-	}
-		
-	if(gold_response->rc) {
-		error("gold_response has non-zero rc(%d): %s",
-		      gold_response->rc,
-		      gold_response->message);
-		errno = gold_response->rc;
-		
-	}
-	destroy_gold_response(gold_response);		
-	
-	return NULL;
-}
-
-extern List acct_storage_p_remove_users(void *db_conn,
-				       acct_user_cond_t *user_q)
-{
-	ListIterator itr = NULL;
-//	int rc = SLURM_SUCCESS;
-	gold_request_t *gold_request = NULL;
-	gold_response_t *gold_response = NULL;
-	char *object = NULL;
-	int set = 0;
-
-	if(!user_q) {
-		error("acct_storage_p_remove_users: "
-		      "we need conditions to remove");
-		return NULL;
-	}
-
-	gold_request = create_gold_request(GOLD_OBJECT_USER,
-					   GOLD_ACTION_DELETE);
-	if(!gold_request) { 
-		error("acct_storage_p_remove_users: "
-		      "couldn't create gold_request");
-		return NULL;
-	}
-	
-	if(user_q->assoc_cond->user_list 
-	   && list_count(user_q->assoc_cond->user_list)) {
-		itr = list_iterator_create(user_q->assoc_cond->user_list);
-		if(list_count(user_q->assoc_cond->user_list) > 1)
-			set = 2;
-		else
-			set = 0;
-		
-		while((object = list_next(itr))) {
-			gold_request_add_condition(gold_request, "Name",
-						   object,
-						   GOLD_OPERATOR_NONE, set);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-	}
-
-	if(user_q->def_acct_list && list_count(user_q->def_acct_list)) {
-		itr = list_iterator_create(user_q->def_acct_list);
-		if(list_count(user_q->def_acct_list) > 1)
-			set = 2;
-		else
-			set = 0;
-		
-		while((object = list_next(itr))) {
-			gold_request_add_condition(gold_request,
-						   "DefaultProject",
-						   object,
-						   GOLD_OPERATOR_NONE, set);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-	}
-	
-	gold_response = get_gold_response(gold_request);	
-	destroy_gold_request(gold_request);
-	
-	if(!gold_response) {
-		error("acct_storage_p_remove_users: "
-		      "no response received");
-		return NULL;
-	}
-		
-	if(gold_response->rc) {
-		error("acct_storage_p_remove_users: "
-		      "gold_response has non-zero rc(%d): %s",
-		      gold_response->rc,
-		      gold_response->message);
-		errno = gold_response->rc;
-		
-	}
-	destroy_gold_response(gold_response);		
-		
-	return NULL;
-}
-
-extern List acct_storage_p_remove_coord(void *db_conn,
-				       char *acct,
-				       acct_user_cond_t *user_q)
-{
-	return SLURM_SUCCESS;
-}
-
-extern List acct_storage_p_remove_accts(void *db_conn,
-				       acct_account_cond_t *acct_q)
-{
-	ListIterator itr = NULL;
-//	int rc = SLURM_SUCCESS;
-	gold_request_t *gold_request = NULL;
-	gold_response_t *gold_response = NULL;
-	char *object = NULL;
-	int set = 0;
-
-	if(!acct_q) {
-		error("acct_storage_p_remove_accts: "
-		      "we need conditions to remove");
-		return NULL;
-	}
-
-	gold_request = create_gold_request(GOLD_OBJECT_PROJECT,
-					   GOLD_ACTION_DELETE);
-	if(!gold_request) { 
-		error("acct_storage_p_remove_accts: "
-		      "couldn't create gold_request");
-		return NULL;
-	}
-	
-	if(acct_q->assoc_cond->acct_list
-	   && list_count(acct_q->assoc_cond->acct_list)) {
-		itr = list_iterator_create(acct_q->assoc_cond->acct_list);
-		if(list_count(acct_q->assoc_cond->acct_list) > 1)
-			set = 2;
-		else
-			set = 0;
-		
-		while((object = list_next(itr))) {
-			gold_request_add_condition(gold_request, "Name",
-						   object,
-						   GOLD_OPERATOR_NONE, set);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-	}
-
-	if(acct_q->description_list 
-	   && list_count(acct_q->description_list)) {
-		itr = list_iterator_create(acct_q->description_list);
-		if(list_count(acct_q->description_list) > 1)
-			set = 2;
-		else
-			set = 0;
-		
-		while((object = list_next(itr))) {
-			gold_request_add_condition(gold_request, "Description",
-						   object,
-						   GOLD_OPERATOR_NONE, set);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-	}
-
-	if(acct_q->organization_list 
-	   && list_count(acct_q->organization_list)) {
-		itr = list_iterator_create(acct_q->organization_list);
-		if(list_count(acct_q->organization_list) > 1)
-			set = 2;
-		else
-			set = 0;
-		
-		while((object = list_next(itr))) {
-			gold_request_add_condition(gold_request, "Organization",
-						   object,
-						   GOLD_OPERATOR_NONE, set);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-	}
-
-	gold_response = get_gold_response(gold_request);	
-	destroy_gold_request(gold_request);
-	
-	if(!gold_response) {
-		error("acct_storage_p_remove_accts: "
-		      "no response received");
-		return NULL;
-	}
-	
-	if(gold_response->rc) {
-		error("acct_storage_p_remove_accts: "
-		      "gold_response has non-zero rc(%d): %s",
-		      gold_response->rc,
-		      gold_response->message);
-		errno = gold_response->rc;
-		
-	}
-	destroy_gold_response(gold_response);		
-		
-	return NULL;
-}
-
-extern List acct_storage_p_remove_clusters(void *db_conn,
-					  acct_cluster_cond_t *cluster_q)
-{
-	ListIterator itr = NULL;
-//	int rc = SLURM_SUCCESS;
-	gold_request_t *gold_request = NULL;
-	gold_response_t *gold_response = NULL;
-	char *object = NULL;
-	int set = 0;
-
-	if(!cluster_q) {
-		error("acct_storage_p_modify_clusters: "
-		      "we need conditions to modify");
-		return NULL;
-	}
-
-	gold_request = create_gold_request(GOLD_OBJECT_MACHINE,
-					   GOLD_ACTION_DELETE);
-	if(!gold_request) { 
-		error("acct_storage_p_remove_clusters: "
-		      "couldn't create gold_request");
-		return NULL;
-	}
-	
-	if(cluster_q->cluster_list && list_count(cluster_q->cluster_list)) {
-		itr = list_iterator_create(cluster_q->cluster_list);
-		if(list_count(cluster_q->cluster_list) > 1)
-			set = 2;
-		else
-			set = 0;
-		
-		while((object = list_next(itr))) {
-			gold_request_add_condition(gold_request, "Name",
-						   object,
-						   GOLD_OPERATOR_NONE, set);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-	}
-
-
-	gold_response = get_gold_response(gold_request);	
-	destroy_gold_request(gold_request);
-		
-	if(!gold_response) {
-		error("acct_storage_p_remove_clusters: "
-		      "no response received");
-		return NULL;
-	}
-	
-	if(gold_response->rc) {
-		error("acct_storage_p_remove_clusters: "
-		      "gold_response has non-zero rc(%d): %s",
-		      gold_response->rc,
-		      gold_response->message);
-		errno = gold_response->rc;
-		destroy_gold_response(gold_response);
-		return NULL;
-	}
-	destroy_gold_response(gold_response);
-
-	gold_request = create_gold_request(GOLD_OBJECT_MACHINE_HOUR_USAGE,
-					   GOLD_ACTION_DELETE);
-	if(!gold_request) { 
-		error("acct_storage_p_remove_clusters: "
-		      "couldn't create gold_request");
-		return NULL;
-	}
-	
-	if(cluster_q->cluster_list && list_count(cluster_q->cluster_list)) {
-		itr = list_iterator_create(cluster_q->cluster_list);
-		if(list_count(cluster_q->cluster_list) > 1)
-			set = 2;
-		else
-			set = 0;
-		
-		while((object = list_next(itr))) {
-			gold_request_add_condition(gold_request, "Machine",
-						   object,
-						   GOLD_OPERATOR_NONE, set);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-	}
-	gold_response = get_gold_response(gold_request);	
-	destroy_gold_request(gold_request);
-	
-	if(!gold_response) {
-		error("acct_storage_p_remove_clusters: "
-		      "no response received");
-		destroy_gold_request(gold_request);
-		return NULL;
-	}
-		
-	if(gold_response->rc) {
-		error("acct_storage_p_remove_clusters: "
-		      "gold_response has non-zero rc(%d): %s",
-		      gold_response->rc,
-		      gold_response->message);
-		errno = gold_response->rc;
-		destroy_gold_request(gold_request);
-		destroy_gold_response(gold_response);
-		return NULL;
-	}
-	destroy_gold_response(gold_response);
-
-	gold_request->object = GOLD_OBJECT_MACHINE_DAY_USAGE;
-	gold_response = get_gold_response(gold_request);	
-	if(!gold_response) {
-		error("acct_storage_p_remove_clusters: "
-		      "no response received");
-		destroy_gold_request(gold_request);
-		return NULL;
-	}
-		
-	if(gold_response->rc) {
-		error("acct_storage_p_remove_clusters: "
-		      "gold_response has non-zero rc(%d): %s",
-		      gold_response->rc,
-		      gold_response->message);
-		errno = gold_response->rc;
-		destroy_gold_request(gold_request);
-		destroy_gold_response(gold_response);
-		return NULL;
-	}
-	
-	destroy_gold_response(gold_response);
-
-	gold_request->object = GOLD_OBJECT_MACHINE_MONTH_USAGE;
-	gold_response = get_gold_response(gold_request);	
-	if(!gold_response) {
-		error("acct_storage_p_remove_clusters: "
-		      "no response received");
-		destroy_gold_request(gold_request);
-		return NULL;
-	}
-		
-	if(gold_response->rc) {
-		error("acct_storage_p_remove_clusters: "
-		      "gold_response has non-zero rc(%d): %s",
-		      gold_response->rc,
-		      gold_response->message);
-		errno = gold_response->rc;
-		
-	}
-	
-	destroy_gold_request(gold_request);
-	destroy_gold_response(gold_response);
-	
-	return NULL;
-}
-
-extern List acct_storage_p_remove_associations(void *db_conn,
-					      acct_association_cond_t *assoc_q)
-{
-	ListIterator itr = NULL;
-//	int rc = SLURM_SUCCESS;
-	gold_request_t *gold_request = NULL;
-	gold_response_t *gold_response = NULL;
-	char *object = NULL;
-	int set = 0;
-
-	if(!assoc_q) {
-		error("acct_storage_p_remove_associations: "
-		      "we need conditions to remove");
-		return NULL;
-	}
-
-	gold_request = create_gold_request(GOLD_OBJECT_ACCT,
-					   GOLD_ACTION_DELETE);
-	if(!gold_request) { 
-		error("couldn't create gold_request");
-		return NULL;
-	}
-
-	if(assoc_q->id_list && list_count(assoc_q->id_list)) {
-		itr = list_iterator_create(assoc_q->id_list);
-		if(list_count(assoc_q->id_list) > 1)
-			set = 2;
-		else
-			set = 0;
-		
-		while((object = list_next(itr))) {
-			gold_request_add_condition(gold_request, "Id",
-						   object,
-						   GOLD_OPERATOR_NONE, set);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-	}
-
-	if(assoc_q->user_list && list_count(assoc_q->user_list)) {
-		itr = list_iterator_create(assoc_q->user_list);
-		if(list_count(assoc_q->user_list) > 1)
-			set = 2;
-		else
-			set = 0;
-		
-		while((object = list_next(itr))) {
-			gold_request_add_condition(gold_request, "User",
-						   object,
-						   GOLD_OPERATOR_NONE, set);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-	}
-
-	if(assoc_q->acct_list && list_count(assoc_q->acct_list)) {
-		itr = list_iterator_create(assoc_q->acct_list);
-		if(list_count(assoc_q->acct_list) > 1)
-			set = 2;
-		else
-			set = 0;
-		
-		while((object = list_next(itr))) {
-			gold_request_add_condition(gold_request, "Project",
-						   object,
-						   GOLD_OPERATOR_NONE, set);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-	}
-
-	if(assoc_q->cluster_list && list_count(assoc_q->cluster_list)) {
-		itr = list_iterator_create(assoc_q->cluster_list);
-		if(list_count(assoc_q->cluster_list) > 1)
-			set = 2;
-		else
-			set = 0;
-		
-		while((object = list_next(itr))) {
-			gold_request_add_condition(gold_request, "Machine",
-						   object,
-						   GOLD_OPERATOR_NONE, set);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-	}
-
-	gold_response = get_gold_response(gold_request);	
-	destroy_gold_request(gold_request);
-	
-	if(!gold_response) {
-		error("acct_storage_p_modify_associations: "
-		      "no response received");
-		return NULL;
-	}
-		
-	if(gold_response->rc) {
-		error("gold_response has non-zero rc(%d): %s",
-		      gold_response->rc,
-		      gold_response->message);
-		errno = gold_response->rc;
-		
-	}
-
-	if(gold_response->entry_cnt > 0) {
-		ListIterator itr = NULL;
-		ListIterator itr2 = NULL;
-		gold_response_entry_t *resp_entry = NULL;
-		gold_name_value_t *name_val = NULL;
-		List id_list = list_create(slurm_destroy_char);
-
-		itr = list_iterator_create(gold_response->entries);
-		while((resp_entry = list_next(itr))) {
-			itr2 = list_iterator_create(
-				resp_entry->name_val);
-			while((name_val = list_next(itr2))) {
-				if(!strcmp(name_val->name, "Id")) {
-					list_push(id_list, name_val->value);
-					break;
-				}
-			}
-			list_iterator_destroy(itr2);			
-		}
-		list_iterator_destroy(itr);
-		_remove_association_accounting(id_list);
-		list_destroy(id_list);
-	} else {
-		debug3("no associations found");
-	}
-	destroy_gold_response(gold_response);		
-
-	return NULL;
-}
-
-extern List acct_storage_p_remove_qos(void *db_conn, uint32_t uid, 
-				      acct_qos_cond_t *qos_cond)
-{
-	return NULL;
-}
-
-extern List acct_storage_p_get_users(void *db_conn, uid_t uid,
-				     acct_user_cond_t *user_q)
-{
-	gold_request_t *gold_request = NULL;
-	gold_response_t *gold_response = NULL;
-	List user_list = NULL;
-	ListIterator itr = NULL;
-	char *object = NULL;
-	int set = 0;
-//	char tmp_buff[50];
-
-	gold_request = create_gold_request(GOLD_OBJECT_USER,
-					   GOLD_ACTION_QUERY);
-
-	if(!gold_request) 
-		return NULL;
-
-	if(!user_q) 
-		goto empty;
-
-	if(user_q->assoc_cond->user_list 
-	   && list_count(user_q->assoc_cond->user_list)) {
-		itr = list_iterator_create(user_q->assoc_cond->user_list);
-		if(list_count(user_q->assoc_cond->user_list) > 1)
-			set = 2;
-		else
-			set = 0;
-		
-		while((object = list_next(itr))) {
-			gold_request_add_condition(gold_request, "Name",
-						   object,
-						   GOLD_OPERATOR_NONE, set);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-	}
-
-	if(user_q->def_acct_list && list_count(user_q->def_acct_list)) {
-		itr = list_iterator_create(user_q->def_acct_list);
-		if(list_count(user_q->def_acct_list) > 1)
-			set = 2;
-		else
-			set = 0;
-		
-		while((object = list_next(itr))) {
-			gold_request_add_condition(gold_request,
-						   "DefaultProject",
-						   object,
-						   GOLD_OPERATOR_NONE, set);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-	}
-	
-/* 	if(user_q->qos != ACCT_QOS_NOTSET) { */
-/* 		snprintf(tmp_buff, sizeof(tmp_buff), "%u", */
-/* 			 user_q->qos-1); */
-/* 		gold_request_add_condition(gold_request, "Expedite", */
-/* 					   tmp_buff, */
-/* 					   GOLD_OPERATOR_NONE, 0);		 */
-/* 	} */
-
-empty:
-	gold_request_add_condition(gold_request, "Active",
-				   "True",
-				   GOLD_OPERATOR_NONE,
-				   0);
-
-	gold_request_add_condition(gold_request, "Special",
-				   "False",
-				   GOLD_OPERATOR_NONE,
-				   0);
-
-	gold_request_add_selection(gold_request, "Name");
-	gold_request_add_selection(gold_request, "DefaultProject");
-	gold_request_add_selection(gold_request, "Expedite");
-		
-	gold_response = get_gold_response(gold_request);	
-	destroy_gold_request(gold_request);
-
-	if(!gold_response) {
-		error("acct_storage_p_get_users: no response received");
-		return NULL;
-	}
-
-	user_list = _get_user_list_from_response(gold_response);
-	
-	destroy_gold_response(gold_response);
-
-	return user_list;
-}
-
-extern List acct_storage_p_get_accts(void *db_conn, uid_t uid,
-				     acct_account_cond_t *acct_q)
-{
-	gold_request_t *gold_request = NULL;
-	gold_response_t *gold_response = NULL;
-	List acct_list = NULL;
-	ListIterator itr = NULL;
-	int set = 0;
-	char *object = NULL;
-//	char tmp_buff[50];
-
-
-	gold_request = create_gold_request(GOLD_OBJECT_PROJECT,
-					   GOLD_ACTION_QUERY);
-	if(!gold_request) 
-		return NULL;
-
-	if(!acct_q) 
-		goto empty;
-
-	if(acct_q->assoc_cond->acct_list 
-	   && list_count(acct_q->assoc_cond->acct_list)) {
-		itr = list_iterator_create(acct_q->assoc_cond->acct_list);
-		if(list_count(acct_q->assoc_cond->acct_list) > 1)
-			set = 2;
-		else
-			set = 0;
-		
-		while((object = list_next(itr))) {
-			gold_request_add_condition(gold_request, "Name",
-						   object,
-						   GOLD_OPERATOR_NONE, set);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-	}
-
-	if(acct_q->description_list 
-	   && list_count(acct_q->description_list)) {
-		itr = list_iterator_create(acct_q->description_list);
-		if(list_count(acct_q->description_list) > 1)
-			set = 2;
-		else
-			set = 0;
-		
-		while((object = list_next(itr))) {
-			gold_request_add_condition(gold_request, "Description",
-						   object,
-						   GOLD_OPERATOR_NONE, set);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-	}
-
-	if(acct_q->organization_list 
-	   && list_count(acct_q->organization_list)) {
-		itr = list_iterator_create(acct_q->organization_list);
-		if(list_count(acct_q->organization_list) > 1)
-			set = 2;
-		else
-			set = 0;
-		
-		while((object = list_next(itr))) {
-			gold_request_add_condition(gold_request, "Organization",
-						   object,
-						   GOLD_OPERATOR_NONE, set);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-	}
-
-/* 	if(acct_q->qos != ACCT_QOS_NOTSET) { */
-/* 		snprintf(tmp_buff, sizeof(tmp_buff), "%u", */
-/* 			 acct_q->qos-1); */
-/* 		gold_request_add_condition(gold_request, "Expedite", */
-/* 					   tmp_buff, */
-/* 					   GOLD_OPERATOR_NONE, 0);		 */
-/* 	} */
-empty:
-	gold_request_add_condition(gold_request, "Active",
-				   "True",
-				   GOLD_OPERATOR_NONE,
-				   0);
-
-	gold_request_add_condition(gold_request, "Special",
-				   "False",
-				   GOLD_OPERATOR_NONE,
-				   0);
-
-	gold_request_add_selection(gold_request, "Name");
-	gold_request_add_selection(gold_request, "Organization");
-	gold_request_add_selection(gold_request, "Description");
-	gold_request_add_selection(gold_request, "Expedite");
-		
-	gold_response = get_gold_response(gold_request);	
-	destroy_gold_request(gold_request);
-
-	if(!gold_response) {
-		error("acct_storage_p_get_accts: no response received");
-		return NULL;
-	}
-
-	acct_list = _get_acct_list_from_response(gold_response);
-	
-	destroy_gold_response(gold_response);
-
-	return acct_list;
-}
-
-extern List acct_storage_p_get_clusters(void *db_conn, uid_t uid,
-					acct_cluster_cond_t *cluster_q)
-{
-	gold_request_t *gold_request = NULL;
-	gold_response_t *gold_response = NULL;
-	List cluster_list = NULL;
-	ListIterator itr = NULL;
-	int set = 0;
-	char *object = NULL;
-
-
-	gold_request = create_gold_request(GOLD_OBJECT_MACHINE,
-					   GOLD_ACTION_QUERY);
-	if(!gold_request) 
-		return NULL;
-
-	if(!cluster_q) 
-		goto empty;
-
-	if(cluster_q->cluster_list && list_count(cluster_q->cluster_list)) {
-		itr = list_iterator_create(cluster_q->cluster_list);
-		if(list_count(cluster_q->cluster_list) > 1)
-			set = 2;
-		else
-			set = 0;
-		
-		while((object = list_next(itr))) {
-			gold_request_add_condition(gold_request, "Name",
-						   object,
-						   GOLD_OPERATOR_NONE, set);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-	}
-
-empty:
-	gold_request_add_condition(gold_request, "Active",
-				   "True",
-				   GOLD_OPERATOR_NONE,
-				   0);
-
-	gold_request_add_condition(gold_request, "Special",
-				   "False",
-				   GOLD_OPERATOR_NONE,
-				   0);
-	
-	gold_request_add_selection(gold_request, "Name");
-		
-	gold_response = get_gold_response(gold_request);	
-	destroy_gold_request(gold_request);
-
-	if(!gold_response) {
-		error("acct_storage_p_get_clusters: no response received");
-		return NULL;
-	}
-
-	cluster_list = _get_cluster_list_from_response(gold_response);
-	
-	destroy_gold_response(gold_response);
-
-	return cluster_list;
-}
-
-extern List acct_storage_p_get_associations(void *db_conn, uid_t uid,
-					    acct_association_cond_t *assoc_q)
-{
-
-	gold_request_t *gold_request = NULL;
-	gold_response_t *gold_response = NULL;
-	List association_list = NULL;
-	ListIterator itr = NULL;
-	int set = 0;
-	char *object = NULL;
-
-	gold_request = create_gold_request(GOLD_OBJECT_ACCT,
-					   GOLD_ACTION_QUERY);
-	
-	if(!gold_request) 
-		return NULL;
-
-	if(!assoc_q) 
-		goto empty;
-	
-	if(assoc_q->id_list && list_count(assoc_q->id_list)) {
-		itr = list_iterator_create(assoc_q->id_list);
-		if(list_count(assoc_q->id_list) > 1)
-			set = 2;
-		else
-			set = 0;
-		
-		while((object = list_next(itr))) {
-			gold_request_add_condition(gold_request, "Id",
-						   object,
-						   GOLD_OPERATOR_NONE, set);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-	}
-
-	if(assoc_q->user_list && list_count(assoc_q->user_list)) {
-		itr = list_iterator_create(assoc_q->user_list);
-		if(list_count(assoc_q->user_list) > 1)
-			set = 2;
-		else
-			set = 0;
-		
-		while((object = list_next(itr))) {
-			gold_request_add_condition(gold_request, "User",
-						   object,
-						   GOLD_OPERATOR_NONE, set);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-	}
-
-	if(assoc_q->acct_list && list_count(assoc_q->acct_list)) {
-		itr = list_iterator_create(assoc_q->acct_list);
-		if(list_count(assoc_q->acct_list) > 1)
-			set = 2;
-		else
-			set = 0;
-		
-		while((object = list_next(itr))) {
-			gold_request_add_condition(gold_request, "Project",
-						   object,
-						   GOLD_OPERATOR_NONE, set);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-	}
-
-	if(assoc_q->cluster_list && list_count(assoc_q->cluster_list)) {
-		itr = list_iterator_create(assoc_q->cluster_list);
-		if(list_count(assoc_q->cluster_list) > 1)
-			set = 2;
-		else
-			set = 0;
-		
-		while((object = list_next(itr))) {
-			gold_request_add_condition(gold_request, "Machine",
-						   object,
-						   GOLD_OPERATOR_NONE, set);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-	}
-
-empty:
-	gold_request_add_selection(gold_request, "Id");
-	gold_request_add_selection(gold_request, "User");
-	gold_request_add_selection(gold_request, "Project");
-	gold_request_add_selection(gold_request, "Machine");
-	gold_request_add_selection(gold_request, "Parent");
-	gold_request_add_selection(gold_request, "FairShare");
-	gold_request_add_selection(gold_request, "MaxJobs");
-	gold_request_add_selection(gold_request, "MaxNodesPerJob");
-	gold_request_add_selection(gold_request, "MaxWallDurationPerJob");
-	gold_request_add_selection(gold_request, "MaxProcSecondsPerJob");
-		
-	gold_response = get_gold_response(gold_request);	
-	destroy_gold_request(gold_request);
-
-	if(!gold_response) {
-		error("acct_storage_p_get_associations: "
-		      "no response received");
-		return NULL;
-	}
-
-	association_list = _get_association_list_from_response(gold_response);
-
-	destroy_gold_response(gold_response);
-
-	return association_list;
-}
-
-extern List acct_storage_p_get_qos(void *db_conn, uid_t uid,
-				   acct_qos_cond_t *qos_cond)
-{
-	return NULL;
-}
-
-extern List acct_storage_p_get_txn(void *db_conn, uid_t uid,
-				   acct_txn_cond_t *txn_cond)
-{
-	return NULL;
-}
-
-extern int acct_storage_p_get_usage(void *db_conn, uid_t uid,
-				    acct_association_rec_t *acct_assoc,
-				    time_t start, time_t end)
-{
-	int rc = SLURM_ERROR;
-/* 	gold_request_t *gold_request = NULL; */
-/* 	gold_response_t *gold_response = NULL; */
-/* 	char tmp_buff[50]; */
-/* 	gold_object_t g_object; */
-/* 	char *req_cpu_type = NULL; */
-
-/* 	if(!acct_assoc || acct_assoc->id) { */
-/* 		error("acct_storage_p_get_usage: " */
-/* 		      "We need an id to go off to query off of"); */
-/* 		return rc; */
-/* 	} */
-
-/* 	switch(type) { */
-/* 	case ACCT_USAGE_HOUR: */
-/* 		g_object = GOLD_OBJECT_ACCT_HOUR_USAGE; */
-/* 		req_cpu_type = "AllocatedCPUSeconds"; */
-/* 		break; */
-/* 	case ACCT_USAGE_DAY: */
-/* 		g_object = GOLD_OBJECT_ACCT_DAY_USAGE; */
-/* 		req_cpu_type = "AllocatedCPUSeconds"; */
-/* 		break; */
-/* 	case ACCT_USAGE_MONTH: */
-/* 		g_object = GOLD_OBJECT_ACCT_MONTH_USAGE; */
-/* 		req_cpu_type = "AllocatedCPUHours"; */
-/* 		break; */
-/* 	default: */
-/* 		error("Unknown usage type"); */
-/* 		return rc; */
-/* 	} */
-/* 	gold_request = create_gold_request( */
-/* 		g_object, GOLD_ACTION_QUERY); */
-
-/* 	if(!gold_request)  */
-/* 		return rc; */
-
-/* 	snprintf(tmp_buff, sizeof(tmp_buff), "%u", acct_assoc->id); */
-/* 	gold_request_add_condition(gold_request, "Acct", tmp_buff, */
-/* 				   GOLD_OPERATOR_NONE, 0); */
-
-/* 	if(start) { */
-/* 		snprintf(tmp_buff, sizeof(tmp_buff), "%d", (int)start); */
-/* 		gold_request_add_condition(gold_request, "PeriodStart", */
-/* 					   tmp_buff, */
-/* 					   GOLD_OPERATOR_GE, 0); */
-/* 	} */
-/* 	if(end) {	 */
-/* 		snprintf(tmp_buff, sizeof(tmp_buff), "%u", (int)end); */
-/* 		gold_request_add_condition(gold_request, "PeriodStart", */
-/* 					   tmp_buff, */
-/* 					   GOLD_OPERATOR_L, 0); */
-/* 	} */
-
-/* 	gold_request_add_selection(gold_request, "PeriodStart"); */
-/* 	gold_request_add_selection(gold_request, req_cpu_type); */
-
-/* 	gold_response = get_gold_response(gold_request);	 */
-/* 	destroy_gold_request(gold_request); */
-
-/* 	if(!gold_response) { */
-/* 		error("acct_storage_p_get_usage: " */
-/* 		      "no response received"); */
-/* 		return rc; */
-/* 	} */
-
-/* 	rc = _get_acct_accounting_list_from_response( */
-/* 		gold_response, acct_assoc); */
-
-/* 	destroy_gold_response(gold_response); */
-
-	return rc;
-}
-
-extern int acct_storage_p_roll_usage(void *db_conn, 
-				     time_t sent_start)
-{
-	int rc = SLURM_ERROR;
-	/* FIX ME: This doesn't do anything now */
-/* 	gold_request_t *gold_request = NULL; */
-/* 	gold_response_t *gold_response = NULL; */
-/* 	char tmp_buff[50]; */
-
-/* 	if(!acct_assoc || acct_assoc->id) { */
-/* 		error("acct_storage_p_roll_usage: " */
-/* 		      "We need an id to go off to query off of"); */
-/* 		return rc; */
-/* 	} */
-
-/* 	switch(type) { */
-/* 	case ACCT_USAGE_HOUR: */
-/* 		g_object = GOLD_OBJECT_ACCT_HOUR_USAGE; */
-/* 		req_cpu_type = "AllocatedCPUSecs"; */
-/* 		break; */
-/* 	case ACCT_USAGE_DAY: */
-/* 		g_object = GOLD_OBJECT_ACCT_DAY_USAGE; */
-/* 		req_cpu_type = "AllocatedCPUSecs"; */
-/* 		break; */
-/* 	case ACCT_USAGE_MONTH: */
-/* 		g_object = GOLD_OBJECT_ACCT_MONTH_USAGE; */
-/* 		req_cpu_type = "AllocatedCPUHours"; */
-/* 		break; */
-/* 	default: */
-/* 		error("Unknown usage type"); */
-/* 		return rc; */
-/* 	} */
-/* 	gold_request = create_gold_request( */
-/* 		GOLD_OBJECT_ACCT_DAY_USAGE, GOLD_ACTION_QUERY); */
-
-/* 	if(!gold_request)  */
-/* 		return rc; */
-
-/* 	snprintf(tmp_buff, sizeof(tmp_buff), "%u", acct_assoc->id); */
-/* 	gold_request_add_condition(gold_request, "Acct", tmp_buff, */
-/* 				   GOLD_OPERATOR_NONE, 0); */
-
-/* 	if(start) { */
-/* 		snprintf(tmp_buff, sizeof(tmp_buff), "%d", (int)start); */
-/* 		gold_request_add_condition(gold_request, "PeriodStart", */
-/* 					   tmp_buff, */
-/* 					   GOLD_OPERATOR_GE, 0); */
-/* 	} */
-/* 	if(end) {	 */
-/* 		snprintf(tmp_buff, sizeof(tmp_buff), "%u", (int)end); */
-/* 		gold_request_add_condition(gold_request, "PeriodStart", */
-/* 					   tmp_buff, */
-/* 					   GOLD_OPERATOR_L, 0); */
-/* 	} */
-
-/* 	gold_request_add_selection(gold_request, "PeriodStart"); */
-/* 	gold_request_add_selection(gold_request, "AllocatedCPUSecs"); */
-
-/* 	gold_response = get_gold_response(gold_request);	 */
-/* 	destroy_gold_request(gold_request); */
-
-/* 	if(!gold_response) { */
-/* 		error("acct_storage_p_get_daily_usage: " */
-/* 		      "no response received"); */
-/* 		return rc; */
-/* 	} */
-
-/* 	rc = _get_acct_accounting_list_from_response( */
-/* 		gold_response, acct_assoc); */
-
-/* 	destroy_gold_response(gold_response); */
-
-	return rc;
-}
-
-extern int clusteracct_storage_p_node_down(void *db_conn,
-					   char *cluster,
-					   struct node_record *node_ptr,
-					   time_t event_time,
-					   char *reason)
-{
-	uint16_t cpus;
-	int rc = SLURM_ERROR;
-	gold_request_t *gold_request = NULL;
-	gold_response_t *gold_response = NULL;
-	char tmp_buff[50];
-	char *my_reason;
-
-	if (slurmctld_conf.fast_schedule && !slurmdbd_conf)
-		cpus = node_ptr->config_ptr->cpus;
-	else
-		cpus = node_ptr->cpus;
-
-	if (reason)
-		my_reason = reason;
-	else
-		my_reason = node_ptr->reason;
-
-#if _DEBUG
-	slurm_make_time_str(&event_time, tmp_buff, sizeof(tmp_buff));
-	info("cluster_acct_down: %s at %s with %u cpus due to %s", 
-	     node_ptr->name, tmp_buff, cpus, reason);
-#endif
-	/* If the node was already down end that record since the
-	 * reason will most likely be different
-	 */
-
-	gold_request = create_gold_request(GOLD_OBJECT_EVENT,
-					   GOLD_ACTION_MODIFY);
-	if(!gold_request) 
-		return rc;
-	
-	gold_request_add_condition(gold_request, "Machine", cluster,
-				   GOLD_OPERATOR_NONE, 0);
-	gold_request_add_condition(gold_request, "EndTime", "0",
-				   GOLD_OPERATOR_NONE, 0);
-	gold_request_add_condition(gold_request, "Name", node_ptr->name,
-				   GOLD_OPERATOR_NONE, 0);
-
-	snprintf(tmp_buff, sizeof(tmp_buff), "%d", ((int)event_time - 1));
-	gold_request_add_assignment(gold_request, "EndTime", tmp_buff);		
-			
-	gold_response = get_gold_response(gold_request);	
-	destroy_gold_request(gold_request);
-
-	if(!gold_response) {
-		error("clusteracct_storage_p_node_down: no response received");
-		return rc;
-	}
-
-	if(gold_response->rc) {
-		error("gold_response has non-zero rc(%d): %s",
-		      gold_response->rc,
-		      gold_response->message);
-		errno = gold_response->rc;
-		destroy_gold_response(gold_response);
-		return rc;
-	}
-	destroy_gold_response(gold_response);
-
-	/* now add the new one */
-	gold_request = create_gold_request(GOLD_OBJECT_EVENT,
-					   GOLD_ACTION_CREATE);
-	if(!gold_request) 
-		return rc;
-	
-	gold_request_add_assignment(gold_request, "Machine", cluster);
-	snprintf(tmp_buff, sizeof(tmp_buff), "%d", (int)event_time);
-	gold_request_add_assignment(gold_request, "StartTime", tmp_buff);
-	gold_request_add_assignment(gold_request, "Name", node_ptr->name);
-	snprintf(tmp_buff, sizeof(tmp_buff), "%u", cpus);
-	gold_request_add_assignment(gold_request, "CPUCount", tmp_buff);
-	gold_request_add_assignment(gold_request, "Reason", my_reason);
-			
-	gold_response = get_gold_response(gold_request);	
-	destroy_gold_request(gold_request);
-
-	if(!gold_response) {
-		error("clusteracct_p_node_down: no response received");
-		return rc;
-	}
-
-	if(!gold_response->rc) 
-		rc = SLURM_SUCCESS;
-	else {
-		error("gold_response has non-zero rc(%d): %s",
-		      gold_response->rc,
-		      gold_response->message);
-		errno = gold_response->rc;
-	}
-	destroy_gold_response(gold_response);
-
-	return rc;
-}
-
-extern int clusteracct_storage_p_node_up(void *db_conn,
-					 char *cluster,
-					 struct node_record *node_ptr,
-					 time_t event_time)
-{
-	int rc = SLURM_ERROR;
-	gold_request_t *gold_request = NULL;
-	gold_response_t *gold_response = NULL;
-	char tmp_buff[50];
-
-#if _DEBUG
-	slurm_make_time_str(&event_time, tmp_buff, sizeof(tmp_buff));
-	info("cluster_acct_up: %s at %s", node_ptr->name, tmp_buff);
-#endif
-
-	gold_request = create_gold_request(GOLD_OBJECT_EVENT,
-					   GOLD_ACTION_MODIFY);
-	if(!gold_request) 
-		return rc;
-	
-	gold_request_add_condition(gold_request, "Machine", cluster,
-				   GOLD_OPERATOR_NONE, 0);
-	gold_request_add_condition(gold_request, "EndTime", "0",
-				   GOLD_OPERATOR_NONE, 0);
-	gold_request_add_condition(gold_request, "Name", node_ptr->name,
-				   GOLD_OPERATOR_NONE, 0);
-
-	snprintf(tmp_buff, sizeof(tmp_buff), "%d", ((int)event_time - 1));
-	gold_request_add_assignment(gold_request, "EndTime", tmp_buff);		
-			
-	gold_response = get_gold_response(gold_request);	
-	destroy_gold_request(gold_request);
-
-	if(!gold_response) {
-		error("clusteracct_p_node_up: no response received");
-		return rc;
-	}
-
-	if(gold_response->rc) {
-		error("gold_response has non-zero rc(%d): %s",
-		      gold_response->rc,
-		      gold_response->message);
-		errno = gold_response->rc;
-		destroy_gold_response(gold_response);
-		return rc;
-	}
-	rc = SLURM_SUCCESS;
-	destroy_gold_response(gold_response);
-
-
-	return rc;
-}
-
-extern int clusteracct_storage_p_register_ctld(char *cluster,
-					       uint16_t port)
-{
-	return SLURM_SUCCESS;
-}
-
-extern int clusteracct_storage_p_cluster_procs(void *db_conn,
-					       char *cluster,
-					       uint32_t procs,
-					       time_t event_time)
-{
-	static uint32_t last_procs = -1;
-	gold_request_t *gold_request = NULL;
-	gold_response_t *gold_response = NULL;
-	char tmp_buff[50];
-	int rc = SLURM_ERROR;
-	bool no_modify = 0;
-
-	if (procs == last_procs) {
-		debug3("we have the same procs as before no need to "
-		       "query the database.");
-		return SLURM_SUCCESS;
-	}
-	last_procs = procs;
-
-	/* Record the processor count */
-#if _DEBUG
-	slurm_make_time_str(&event_time, tmp_buff, sizeof(tmp_buff));
-	info("cluster_acct_procs: %s has %u total CPUs at %s", 
-	     cluster, procs, tmp_buff);
-#endif
-	
-	/* get the last known one */
-	gold_request = create_gold_request(GOLD_OBJECT_EVENT,
-					   GOLD_ACTION_QUERY);
-	if(!gold_request) 
-		return rc;
-	gold_request_add_condition(gold_request, "Machine", cluster,
-				   GOLD_OPERATOR_NONE, 0);
-	gold_request_add_condition(gold_request, "EndTime", "0",
-				   GOLD_OPERATOR_NONE, 0);
-	gold_request_add_condition(gold_request, "Name", "NULL",
-				   GOLD_OPERATOR_NONE, 0);
-
-	gold_request_add_selection(gold_request, "CPUCount");
-		
-	gold_response = get_gold_response(gold_request);	
-	destroy_gold_request(gold_request);
-
-	if(!gold_response) {
-		error("clusteracct_p_cluster_procs: no response received");
-		return rc;
-	}
-
-	if(gold_response->entry_cnt > 0) {
-		gold_response_entry_t *resp_entry = 
-			list_pop(gold_response->entries);
-		gold_name_value_t *name_val = list_pop(resp_entry->name_val);
-
-		if(procs == atoi(name_val->value)) {
-			debug("System hasn't changed since last entry");
-			destroy_gold_name_value(name_val);
-			destroy_gold_response_entry(resp_entry);
-			destroy_gold_response(gold_response);
-			return SLURM_SUCCESS;
-		} else {
-			debug("System has changed from %s cpus to %d",
-			      name_val->value, procs);   
-		}
-
-		destroy_gold_name_value(name_val);
-		destroy_gold_response_entry(resp_entry);
-	} else {
-		debug("We don't have an entry for this machine "
-		      "most likely a first time running.");
-		no_modify = 1;
-	}
-
-	destroy_gold_response(gold_response);
-	
-	if(no_modify) {
-		gold_request = create_gold_request(GOLD_OBJECT_EVENT,
-						   GOLD_ACTION_MODIFY);
-		if(!gold_request) 
-			return rc;
-		
-		gold_request_add_condition(gold_request, "Machine",
-					   cluster,
-					   GOLD_OPERATOR_NONE, 0);
-		gold_request_add_condition(gold_request, "EndTime", "0",
-					   GOLD_OPERATOR_NONE, 0);
-		gold_request_add_condition(gold_request, "Name", "NULL",
-					   GOLD_OPERATOR_NONE, 0);
-		
-		snprintf(tmp_buff, sizeof(tmp_buff), "%d", 
-			 ((int)event_time - 1));
-		gold_request_add_assignment(gold_request, "EndTime", tmp_buff);	
-		
-		gold_response = get_gold_response(gold_request);	
-		destroy_gold_request(gold_request);
-		
-		if(!gold_response) {
-			error("jobacct_p_cluster_procs: no response received");
-			return rc;
-		}
-		
-		if(gold_response->rc) {
-			error("gold_response has non-zero rc(%d): %s",
-			      gold_response->rc,
-			      gold_response->message);
-			errno = gold_response->rc;
-			destroy_gold_response(gold_response);
-			return rc;
-		}
-		destroy_gold_response(gold_response);
-	}
-
-	/* now add the new one */
-	gold_request = create_gold_request(GOLD_OBJECT_EVENT,
-					   GOLD_ACTION_CREATE);
-	if(!gold_request) 
-		return rc;
-	
-	gold_request_add_assignment(gold_request, "Machine", cluster);
-	snprintf(tmp_buff, sizeof(tmp_buff), "%d", (int)event_time);
-	gold_request_add_assignment(gold_request, "StartTime", tmp_buff);
-	snprintf(tmp_buff, sizeof(tmp_buff), "%u", procs);
-	gold_request_add_assignment(gold_request, "CPUCount", tmp_buff);
-			
-	gold_response = get_gold_response(gold_request);	
-	destroy_gold_request(gold_request);
-
-	if(!gold_response) {
-		error("clusteracct_p_cluster_procs: no response received");
-		return rc;
-	}
-
-	if(!gold_response->rc) 
-		rc = SLURM_SUCCESS;
-	else {
-		error("gold_response has non-zero rc(%d): %s",
-		      gold_response->rc,
-		      gold_response->message);
-		errno = gold_response->rc;
-	}
-	destroy_gold_response(gold_response);
-
-	return rc;
-}
-
-extern int clusteracct_storage_p_get_usage(
-	void *db_conn, uid_t uid, 
-	acct_cluster_rec_t *cluster_rec, time_t start, 
-	time_t end)
-{
-	int rc = SLURM_ERROR;
-/* 	gold_request_t *gold_request = NULL; */
-/* 	gold_response_t *gold_response = NULL; */
-/* 	char tmp_buff[50]; */
-/* 	gold_object_t g_object; */
-/* 	char *alloc_cpu = NULL; */
-/* 	char *idle_cpu = NULL; */
-/* 	char *down_cpu = NULL; */
-/* 	char *resv_cpu = NULL; */
-
-/* 	if(!cluster_rec || !cluster_rec->name) { */
-/* 		error("clusteracct_storage_p_get_hourly_usage:" */
-/* 		      "no cluster name given to query."); */
-/* 		return rc; */
-/* 	} */
-/* 	switch(type) { */
-/* 	case ACCT_USAGE_HOUR: */
-/* 		g_object = GOLD_OBJECT_MACHINE_HOUR_USAGE; */
-/* 		alloc_cpu = "AllocatedCPUSeconds"; */
-/* 		idle_cpu = "IdleCPUSeconds"; */
-/* 		down_cpu = "DownCPUSeconds"; */
-/* 		resv_cpu = "ReservedCPUSeconds"; */
-/* 		break; */
-/* 	case ACCT_USAGE_DAY: */
-/* 		g_object = GOLD_OBJECT_MACHINE_DAY_USAGE; */
-/* 		alloc_cpu = "AllocatedCPUSeconds"; */
-/* 		idle_cpu = "IdleCPUSeconds"; */
-/* 		down_cpu = "DownCPUSeconds"; */
-/* 		resv_cpu = "ReservedCPUSeconds"; */
-/* 		break; */
-/* 	case ACCT_USAGE_MONTH: */
-/* 		g_object = GOLD_OBJECT_MACHINE_MONTH_USAGE; */
-/* 		alloc_cpu = "AllocatedCPUHours"; */
-/* 		idle_cpu = "IdleCPUHours"; */
-/* 		down_cpu = "DownCPUHours"; */
-/* 		resv_cpu = "ReservedCPUHours"; */
-/* 		break; */
-/* 	default: */
-/* 		error("Unknown usage type"); */
-/* 		return rc; */
-/* 	} */
-/* 	/\* get the last known one *\/ */
-/* 	gold_request = create_gold_request(GOLD_OBJECT_MACHINE_HOUR_USAGE, */
-/* 					   GOLD_ACTION_QUERY); */
-/* 	if(!gold_request)  */
-/* 		return rc; */
-
-/* 	gold_request_add_condition(gold_request, "Machine", cluster_rec->name, */
-/* 				   GOLD_OPERATOR_NONE, 0); */
-/* 	if(start) { */
-/* 		snprintf(tmp_buff, sizeof(tmp_buff), "%d", (int)start); */
-/* 		gold_request_add_condition(gold_request, "PeriodStart", */
-/* 					   tmp_buff, */
-/* 					   GOLD_OPERATOR_GE, 0); */
-/* 	} */
-/* 	if(end) {	 */
-/* 		snprintf(tmp_buff, sizeof(tmp_buff), "%u", (int)end); */
-/* 		gold_request_add_condition(gold_request, "PeriodStart", */
-/* 					   tmp_buff, */
-/* 					   GOLD_OPERATOR_L, 0); */
-/* 	} */
-
-/* 	gold_request_add_selection(gold_request, "CPUCount"); */
-/* 	gold_request_add_selection(gold_request, "PeriodStart"); */
-/* 	gold_request_add_selection(gold_request, idle_cpu); */
-/* 	gold_request_add_selection(gold_request, down_cpu); */
-/* 	gold_request_add_selection(gold_request, alloc_cpu); */
-/* 	gold_request_add_selection(gold_request, resv_cpu); */
-		
-/* 	gold_response = get_gold_response(gold_request);	 */
-/* 	destroy_gold_request(gold_request); */
-
-/* 	if(!gold_response) { */
-/* 		error("clusteracct_p_get_hourly_usage: no response received"); */
-/* 		return rc; */
-/* 	} */
-
-/* 	if(gold_response->entry_cnt > 0) { */
-/* 		rc = _get_cluster_accounting_list_from_response( */
-/* 			gold_response, cluster_rec); */
-/* 	} else { */
-/* 		debug("We don't have an entry for this machine for this time"); */
-/* 	} */
-/* 	destroy_gold_response(gold_response); */
-
-	return rc;
-}
-
-extern int jobacct_storage_p_job_start(void *db_conn,
-				       struct job_record *job_ptr)
-{
-	gold_object_t action = GOLD_ACTION_CREATE;
-	
-	if(_check_for_job(job_ptr->job_id, job_ptr->details->submit_time)) {
-		debug3("It looks like this job is already in GOLD.");
-		action = GOLD_ACTION_MODIFY;
-	}
-
-	return _add_edit_job(job_ptr, action);
-}
-
-extern int jobacct_storage_p_job_complete(void *db_conn,
-					  struct job_record *job_ptr) 
-{
-	gold_object_t action = GOLD_ACTION_MODIFY;
-	
-	if(!_check_for_job(job_ptr->job_id, job_ptr->details->submit_time)) {
-		error("Couldn't find this job entry.  "
-		      "This shouldn't happen, we are going to create one.");
-		action = GOLD_ACTION_CREATE;
-	}
-
-	return _add_edit_job(job_ptr, action);
-}
-
-extern int jobacct_storage_p_step_start(void *db_conn,
-					struct step_record *step)
-{
-	gold_object_t action = GOLD_ACTION_MODIFY;
-	
-	if(!_check_for_job(step->job_ptr->job_id,
-			   step->job_ptr->details->submit_time)) {
-		error("Couldn't find this job entry.  "
-		      "This shouldn't happen, we are going to create one.");
-		action = GOLD_ACTION_CREATE;
-	}
-
-	return _add_edit_job(step->job_ptr, action);
-
-}
-
-extern int jobacct_storage_p_step_complete(void *db_conn,
-					   struct step_record *step)
-{
-	return SLURM_SUCCESS;	
-}
-
-extern int jobacct_storage_p_suspend(void *db_conn,
-				     struct job_record *job_ptr)
-{
-	return SLURM_SUCCESS;
-}
-
-/* 
- * get info from the storage 
- * returns List of jobacct_job_rec_t *
- * note List needs to be freed when called
- */
-extern List jobacct_storage_p_get_jobs(void *db_conn, uid_t uid,
-				       List selected_steps,
-				       List selected_parts,
-				       sacct_parameters_t *params)
-{
-	gold_request_t *gold_request = create_gold_request(GOLD_OBJECT_JOB,
-							   GOLD_ACTION_QUERY);
-	gold_response_t *gold_response = NULL;
-	gold_response_entry_t *resp_entry = NULL;
-	gold_name_value_t *name_val = NULL;
-	char tmp_buff[50];
-	int set = 0;
-	char *selected_part = NULL;
-	jobacct_selected_step_t *selected_step = NULL;
-	jobacct_job_rec_t *job = NULL;
-	ListIterator itr = NULL;
-	ListIterator itr2 = NULL;
-	List job_list = NULL;
-
-	if(!gold_request) 
-		return NULL;
-
-
-	if(selected_steps && list_count(selected_steps)) {
-		itr = list_iterator_create(selected_steps);
-		if(list_count(selected_steps) > 1)
-			set = 2;
-		else
-			set = 0;
-		while((selected_step = list_next(itr))) {
-			snprintf(tmp_buff, sizeof(tmp_buff), "%u", 
-				 selected_step->jobid);
-			gold_request_add_condition(gold_request, "JobId",
-						   tmp_buff,
-						   GOLD_OPERATOR_NONE,
-						   set);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-	}
-
-	if(selected_parts && list_count(selected_parts)) {
-		if(list_count(selected_parts) > 1)
-			set = 2;
-		else
-			set = 0;
-		itr = list_iterator_create(selected_parts);
-		while((selected_part = list_next(itr))) {
-			gold_request_add_condition(gold_request, "Partition",
-						   selected_part,
-						   GOLD_OPERATOR_NONE,
-						   set);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-	}
-
-	gold_request_add_selection(gold_request, "JobId");
-	gold_request_add_selection(gold_request, "GoldAccountId");
-	gold_request_add_selection(gold_request, "Partition");
-	gold_request_add_selection(gold_request, "RequestedCPUCount");
-	gold_request_add_selection(gold_request, "AllocatedCPUCount");
-	gold_request_add_selection(gold_request, "NodeList");
-	gold_request_add_selection(gold_request, "JobName");
-	gold_request_add_selection(gold_request, "SubmitTime");
-	gold_request_add_selection(gold_request, "EligibleTime");
-	gold_request_add_selection(gold_request, "StartTime");
-	gold_request_add_selection(gold_request, "EndTime");
-	gold_request_add_selection(gold_request, "Suspended");
-	gold_request_add_selection(gold_request, "State");
-	gold_request_add_selection(gold_request, "ExitCode");
-	gold_request_add_selection(gold_request, "QoS");
-
-	gold_response = get_gold_response(gold_request);
-	destroy_gold_request(gold_request);
-
-	if(!gold_response) {
-		error("_check_for_job: no response received");
-		return NULL;
-	}
-	
-	job_list = list_create(destroy_jobacct_job_rec);
-	if(gold_response->entry_cnt > 0) {
-		itr = list_iterator_create(gold_response->entries);
-		while((resp_entry = list_next(itr))) {
-			job = create_jobacct_job_rec();
-			itr2 = list_iterator_create(resp_entry->name_val);
-			while((name_val = list_next(itr2))) {
-				if(!strcmp(name_val->name, "JobId")) {
-					job->jobid = atoi(name_val->value);
-				} else if(!strcmp(name_val->name, 
-						  "GoldAccountId")) {
-					acct_association_rec_t account_rec;
-					memset(&account_rec, 0,
-					       sizeof(acct_association_rec_t));
-					account_rec.id = atoi(name_val->value);
-					/* FIX ME: We need to get the
-					 * parts of the association from
-					 * gold here
-					 */
-	/* 				if(acct_storage_p_get_assoc_id( */
-/* 						   db_conn, */
-/* 						   &account_rec) == SLURM_ERROR) */
-/* 						error("no assoc found for " */
-/* 						      "id %u", */
-/* 						      account_rec.id); */
-					
-/* 					if(account_rec.cluster) { */
-/* 						if(params->opt_cluster && */
-/* 						   strcmp(params->opt_cluster, */
-/* 							  account_rec. */
-/* 							  cluster)) { */
-/* 							destroy_jobacct_job_rec( */
-/* 								job); */
-/* 							job = NULL; */
-/* 							break; */
-/* 						} */
-/* 						job->cluster = */
-/* 							xstrdup(account_rec. */
-/* 								cluster); */
-/* 					} */
-
-					if(account_rec.user) {
-						struct passwd pwd, *result;
-						size_t bufsize;
-						char *buffer;
-						int rc;
-						bufsize = sysconf(
-							_SC_GETPW_R_SIZE_MAX);
-						buffer = xmalloc(bufsize);
-						rc = getpwnam_r(account_rec.
-								user,
-								&pwd, buffer,
-								bufsize, 
-								&result);
-						if (rc != 0)
-							result = NULL;
-						job->user = xstrdup(account_rec.
-								    user);
-						if(result) {
-							job->uid =
-								result->
-								pw_uid;
-							job->gid = 
-								result->
-								pw_gid;
-						}
-						xfree(buffer);
-					}
-					if(account_rec.acct) 
-						job->account =
-							xstrdup(account_rec.
-								acct);
-				} else if(!strcmp(name_val->name,
-						  "Partition")) {
-					job->partition =
-						xstrdup(name_val->value);
-				} else if(!strcmp(name_val->name,
-						  "RequestedCPUCount")) {
-					job->req_cpus = atoi(name_val->value);
-				} else if(!strcmp(name_val->name,
-						  "AllocatedCPUCount")) {
-					job->alloc_cpus = atoi(name_val->value);
-				} else if(!strcmp(name_val->name, "NodeList")) {
-					job->nodes = xstrdup(name_val->value);
-				} else if(!strcmp(name_val->name, "JobName")) {
-					job->jobname = xstrdup(name_val->value);
-				} else if(!strcmp(name_val->name,
-						  "SubmitTime")) {
-					job->submit = atoi(name_val->value);
-				} else if(!strcmp(name_val->name,
-						  "EligibleTime")) {
-					job->eligible = atoi(name_val->value);
-				} else if(!strcmp(name_val->name,
-						  "StartTime")) {
-					job->start = atoi(name_val->value);
-				} else if(!strcmp(name_val->name, "EndTime")) {
-					job->end = atoi(name_val->value);
-				} else if(!strcmp(name_val->name,
-						  "Suspended")) {
-					job->suspended = atoi(name_val->value);
-				} else if(!strcmp(name_val->name, "State")) {
-					job->state = atoi(name_val->value);
-				} else if(!strcmp(name_val->name, "ExitCode")) {
-					job->exitcode = atoi(name_val->value);
-				} /* else if(!strcmp(name_val->name, "QoS")) { */
-/* 					job->qos = atoi(name_val->value); */
-/* 				} */
-			}
-			list_iterator_destroy(itr2);
-
-			if(!job) 
-				continue;
-
-			job->show_full = 1;
-			job->track_steps = 0;
-			job->priority = 0;
-
-			if (!job->nodes) 
-				job->nodes = xstrdup("(unknown)");
-			
-			list_append(job_list, job);
-		}
-		list_iterator_destroy(itr);		
-	}
-	destroy_gold_response(gold_response);
-	
-	return job_list;
-}
-
-/* 
- * get info from the storage 
- * returns List of jobacct_job_rec_t *
- * note List needs to be freed when called
- */
-extern List jobacct_storage_p_get_jobs_cond(void *db_conn, uid_t uid,
-					    void *job_cond)
-{
-	info("not implemented");
-	return NULL;
-}
-
-/* 
- * expire old info from the storage 
- */
-extern void jobacct_storage_p_archive(void *db_conn,
-				      List selected_parts,
-				      void *params)
-{
-	info("not implemented");
-	
-	return;
-}
-
-extern int acct_storage_p_update_shares_used(void *db_conn,
-					     List shares_used)
-{
-	return SLURM_SUCCESS;
-}
diff --git a/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c b/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c
index b3f3a6693..6f0616557 100644
--- a/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c
+++ b/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c
@@ -91,10 +91,14 @@ const uint32_t plugin_version = 100;
 
 static mysql_db_info_t *mysql_db_info = NULL;
 static char *mysql_db_name = NULL;
+static time_t global_last_rollup = 0;
+static pthread_mutex_t rollup_lock = PTHREAD_MUTEX_INITIALIZER;
 
 #define DEFAULT_ACCT_DB "slurm_acct_db"
 #define DELETE_SEC_BACK 86400
 
+
+
 char *acct_coord_table = "acct_coord_table";
 char *acct_table = "acct_table";
 char *assoc_day_table = "assoc_day_usage_table";
@@ -114,6 +118,13 @@ char *user_table = "user_table";
 char *last_ran_table = "last_ran_table";
 char *suspend_table = "suspend_table";
 
+
+typedef enum {
+	QOS_LEVEL_NONE,
+	QOS_LEVEL_SET,
+	QOS_LEVEL_MODIFY
+} qos_level_t;
+
 static int normal_qos_id = NO_VAL;
 
 extern int acct_storage_p_commit(mysql_conn_t *mysql_conn, bool commit);
@@ -134,6 +145,41 @@ extern int clusteracct_storage_p_get_usage(
 	mysql_conn_t *mysql_conn, uid_t uid,
 	acct_cluster_rec_t *cluster_rec, time_t start, time_t end);
 
+extern List acct_storage_p_remove_coord(mysql_conn_t *mysql_conn, uint32_t uid, 
+					List acct_list,
+					acct_user_cond_t *user_cond);
+
+
+/* here to add \\ to all \" in a string */
+static char *_fix_double_quotes(char *str)
+{
+	int i=0, start=0;
+	char *fixed = NULL;
+
+	if(!str)
+		return NULL;
+	
+	while(str[i]) {
+		if(str[i] == '"') {
+			char *tmp = xstrndup(str+start, i-start);
+			xstrfmtcat(fixed, "%s\\\"", tmp);
+			xfree(tmp);
+			i++;
+			start = i + 1;
+		} 
+		
+		i++;
+	}
+	
+	if((i-start) > 0) {
+		char *tmp = xstrndup(str+start, i-start);
+		xstrcat(fixed, tmp);
+		xfree(tmp);
+	}
+
+	return fixed;
+}
+
 /* This should be added to the beginning of each function to make sure
  * we have a connection to the database before we try to use it.
  */
@@ -154,6 +200,738 @@ static int _check_connection(mysql_conn_t *mysql_conn)
 	return SLURM_SUCCESS;
 }
 
+static int _setup_association_limits(acct_association_rec_t *assoc,
+				     char **cols, char **vals,
+				     char **extra, qos_level_t qos_level,
+				     bool get_fs)
+{	
+	if(!assoc)
+		return SLURM_ERROR;
+	
+	if((int)assoc->fairshare >= 0) {
+		xstrcat(*cols, ", fairshare");
+		xstrfmtcat(*vals, ", %u", assoc->fairshare);
+		xstrfmtcat(*extra, ", fairshare=%u", assoc->fairshare);
+	} else if (((int)assoc->fairshare == INFINITE) || get_fs) {
+		xstrcat(*cols, ", fairshare");
+		xstrcat(*vals, ", 1");
+		xstrcat(*extra, ", fairshare=1");		
+	} 
+
+	if((int)assoc->grp_cpu_mins >= 0) {
+		xstrcat(*cols, ", grp_cpu_mins");
+		xstrfmtcat(*vals, ", %llu", assoc->grp_cpu_mins);
+		xstrfmtcat(*extra, ", grp_cpu_mins=%llu",
+			   assoc->grp_cpu_mins);
+	} else if((int)assoc->grp_cpu_mins == INFINITE) {
+		xstrcat(*cols, ", grp_cpu_mins");
+		xstrcat(*vals, ", NULL");
+		xstrcat(*extra, ", grp_cpu_mins=NULL");
+	}
+		
+	if((int)assoc->grp_cpus >= 0) {
+		xstrcat(*cols, ", grp_cpus");
+		xstrfmtcat(*vals, ", %u", assoc->grp_cpus);
+		xstrfmtcat(*extra, ", grp_cpus=%u", assoc->grp_cpus);
+	} else if((int)assoc->grp_cpus == INFINITE) {
+		xstrcat(*cols, ", grp_cpus");
+		xstrcat(*vals, ", NULL");
+		xstrcat(*extra, ", grp_cpus=NULL");
+	}
+
+	if((int)assoc->grp_jobs >= 0) {
+		xstrcat(*cols, ", grp_jobs");
+		xstrfmtcat(*vals, ", %u", assoc->grp_jobs);
+		xstrfmtcat(*extra, ", grp_jobs=%u", assoc->grp_jobs);
+	} else if((int)assoc->grp_jobs == INFINITE) {
+		xstrcat(*cols, ", grp_jobs");
+		xstrcat(*vals, ", NULL");
+		xstrcat(*extra, ", grp_jobs=NULL");
+	}
+
+	if((int)assoc->grp_nodes >= 0) {
+		xstrcat(*cols, ", grp_nodes");
+		xstrfmtcat(*vals, ", %u", assoc->grp_nodes);
+		xstrfmtcat(*extra, ", grp_nodes=%u", assoc->grp_nodes);
+	} else if((int)assoc->grp_nodes == INFINITE) {
+		xstrcat(*cols, ", grp_nodes");
+		xstrcat(*vals, ", NULL");
+		xstrcat(*extra, ", grp_nodes=NULL");
+	}
+
+	if((int)assoc->grp_submit_jobs >= 0) {
+		xstrcat(*cols, ", grp_submit_jobs");
+		xstrfmtcat(*vals, ", %u",
+			   assoc->grp_submit_jobs);
+		xstrfmtcat(*extra, ", grp_submit_jobs=%u",
+			   assoc->grp_submit_jobs);
+	} else if((int)assoc->grp_submit_jobs == INFINITE) {
+		xstrcat(*cols, ", grp_submit_jobs");
+		xstrcat(*vals, ", NULL");
+		xstrcat(*extra, ", grp_submit_jobs=NULL");
+	}
+
+	if((int)assoc->grp_wall >= 0) {
+		xstrcat(*cols, ", grp_wall");
+		xstrfmtcat(*vals, ", %u", assoc->grp_wall);
+		xstrfmtcat(*extra, ", grp_wall=%u",
+			   assoc->grp_wall);
+	} else if((int)assoc->grp_wall == INFINITE) {
+		xstrcat(*cols, ", grp_wall");
+		xstrcat(*vals, ", NULL");
+		xstrcat(*extra, ", grp_wall=NULL");
+	}
+
+	if((int)assoc->max_cpu_mins_pj >= 0) {
+		xstrcat(*cols, ", max_cpu_mins_per_job");
+		xstrfmtcat(*vals, ", %llu", assoc->max_cpu_mins_pj);
+		xstrfmtcat(*extra, ", max_cpu_mins_per_job=%u",
+			   assoc->max_cpu_mins_pj);
+	} else if((int)assoc->max_cpu_mins_pj == INFINITE) {
+		xstrcat(*cols, ", max_cpu_mins_per_job");
+		xstrcat(*vals, ", NULL");
+		xstrcat(*extra, ", max_cpu_mins_per_job=NULL");
+	}
+
+	if((int)assoc->max_cpus_pj >= 0) {
+		xstrcat(*cols, ", max_cpus_per_job");
+		xstrfmtcat(*vals, ", %u", assoc->max_cpus_pj);
+		xstrfmtcat(*extra, ", max_cpus_per_job=%u",
+			   assoc->max_cpus_pj);
+	} else if((int)assoc->max_cpus_pj == INFINITE) {
+		xstrcat(*cols, ", max_cpus_per_job");
+		xstrcat(*vals, ", NULL");
+		xstrcat(*extra, ", max_cpus_per_job=NULL");
+	}
+		
+	if((int)assoc->max_jobs >= 0) {
+		xstrcat(*cols, ", max_jobs");
+		xstrfmtcat(*vals, ", %u", assoc->max_jobs);
+		xstrfmtcat(*extra, ", max_jobs=%u",
+			   assoc->max_jobs);
+	} else if((int)assoc->max_jobs == INFINITE) {
+		xstrcat(*cols, ", max_jobs");
+		xstrcat(*vals, ", NULL");
+		xstrcat(*extra, ", max_jobs=NULL");		
+	}
+
+	if((int)assoc->max_nodes_pj >= 0) {
+		xstrcat(*cols, ", max_nodes_per_job");
+		xstrfmtcat(*vals, ", %u", assoc->max_nodes_pj);
+		xstrfmtcat(*extra, ", max_nodes_per_job=%u",
+			   assoc->max_nodes_pj);
+	} else if((int)assoc->max_nodes_pj == INFINITE) {
+		xstrcat(*cols, ", max_nodes_per_job");
+		xstrcat(*vals, ", NULL");
+		xstrcat(*extra, ", max_nodes_per_job=NULL");
+	}
+
+	if((int)assoc->max_submit_jobs >= 0) {
+		xstrcat(*cols, ", max_submit_jobs");
+		xstrfmtcat(*vals, ", %u", assoc->max_submit_jobs);
+		xstrfmtcat(*extra, ", max_submit_jobs=%u",
+			   assoc->max_submit_jobs);
+	} else if((int)assoc->max_submit_jobs == INFINITE) {
+		xstrcat(*cols, ", max_submit_jobs");
+		xstrcat(*vals, ", NULL");
+		xstrcat(*extra, ", max_submit_jobs=NULL");
+	}
+
+	if((int)assoc->max_wall_pj >= 0) {
+		xstrcat(*cols, ", max_wall_duration_per_job");
+		xstrfmtcat(*vals, ", %u", assoc->max_wall_pj);
+		xstrfmtcat(*extra, ", max_wall_duration_per_job=%u",
+			   assoc->max_wall_pj);
+	} else if((int)assoc->max_wall_pj == INFINITE) {
+		xstrcat(*cols, ", max_wall_duration_per_job");
+		xstrcat(*vals, ", NULL");
+		xstrcat(*extra, ", max_wall_duration_per_job=NULL");
+	}
+
+	if((qos_level != QOS_LEVEL_MODIFY)
+	   && assoc->qos_list && list_count(assoc->qos_list)) {
+		char *qos_type = "qos";
+		char *qos_val = NULL;
+		char *tmp_char = NULL;
+		int set = 0;
+		ListIterator qos_itr = 
+			list_iterator_create(assoc->qos_list);
+		
+		while((tmp_char = list_next(qos_itr))) {
+			if(!set) {
+				if(tmp_char[0] == '+' || tmp_char[0] == '-')
+					qos_type = "delta_qos";
+				set = 1;
+			}
+			xstrfmtcat(qos_val, ",%s", tmp_char);
+		}
+
+		list_iterator_destroy(qos_itr);
+
+		xstrfmtcat(*cols, ", %s", qos_type);
+		
+		
+		xstrfmtcat(*vals, ", '%s'", qos_val); 		
+		xstrfmtcat(*extra, ", %s='%s'", qos_type, qos_val); 
+		xfree(qos_val);
+	} else if((qos_level == QOS_LEVEL_SET) && (normal_qos_id != NO_VAL)) { 
+		/* Add normal qos to the account */
+		xstrcat(*cols, ", qos");
+		xstrfmtcat(*vals, ", ',%d'", normal_qos_id);
+		xstrfmtcat(*extra, ", qos=',%d'", normal_qos_id);
+	}
+
+	return SLURM_SUCCESS;
+
+}
+
+static int _setup_qos_limits(acct_qos_rec_t *qos,
+			     char **cols, char **vals,
+			     char **extra, qos_level_t qos_level)
+{	
+	if(!qos)
+		return SLURM_ERROR;
+	
+	if(qos->description) {
+		xstrcat(*cols, ", description");
+		xstrfmtcat(*vals, ", \"%s\"", qos->description);
+		xstrfmtcat(*extra, ", description=\"%s\"",
+			   qos->description);
+
+	}
+	if((int)qos->priority >= 0) {
+		xstrcat(*cols, ", priority");
+		xstrfmtcat(*vals, ", %d", qos->priority);
+		xstrfmtcat(*extra, ", priority=%d", qos->priority);
+	} else if ((int)qos->priority == INFINITE) {
+		xstrcat(*cols, ", priority");
+		xstrcat(*vals, ", NULL");
+		xstrcat(*extra, ", priority=NULL");		
+	} 
+
+	if((int)qos->grp_cpu_mins >= 0) {
+		xstrcat(*cols, ", grp_cpu_mins");
+		xstrfmtcat(*vals, ", %llu", qos->grp_cpu_mins);
+		xstrfmtcat(*extra, ", grp_cpu_mins=%llu",
+			   qos->grp_cpu_mins);
+	} else if((int)qos->grp_cpu_mins == INFINITE) {
+		xstrcat(*cols, ", grp_cpu_mins");
+		xstrcat(*vals, ", NULL");
+		xstrcat(*extra, ", grp_cpu_mins=NULL");
+	}
+		
+	if((int)qos->grp_cpus >= 0) {
+		xstrcat(*cols, ", grp_cpus");
+		xstrfmtcat(*vals, ", %u", qos->grp_cpus);
+		xstrfmtcat(*extra, ", grp_cpus=%u", qos->grp_cpus);
+	} else if((int)qos->grp_cpus == INFINITE) {
+		xstrcat(*cols, ", grp_cpus");
+		xstrcat(*vals, ", NULL");
+		xstrcat(*extra, ", grp_cpus=NULL");
+	}
+
+	if((int)qos->grp_jobs >= 0) {
+		xstrcat(*cols, ", grp_jobs");
+		xstrfmtcat(*vals, ", %u", qos->grp_jobs);
+		xstrfmtcat(*extra, ", grp_jobs=%u", qos->grp_jobs);
+	} else if((int)qos->grp_jobs == INFINITE) {
+		xstrcat(*cols, ", grp_jobs");
+		xstrcat(*vals, ", NULL");
+		xstrcat(*extra, ", grp_jobs=NULL");
+	}
+
+	if((int)qos->grp_nodes >= 0) {
+		xstrcat(*cols, ", grp_nodes");
+		xstrfmtcat(*vals, ", %u", qos->grp_nodes);
+		xstrfmtcat(*extra, ", grp_nodes=%u", qos->grp_nodes);
+	} else if((int)qos->grp_nodes == INFINITE) {
+		xstrcat(*cols, ", grp_nodes");
+		xstrcat(*vals, ", NULL");
+		xstrcat(*extra, ", grp_nodes=NULL");
+	}
+
+	if((int)qos->grp_submit_jobs >= 0) {
+		xstrcat(*cols, ", grp_submit_jobs");
+		xstrfmtcat(*vals, ", %u",
+			   qos->grp_submit_jobs);
+		xstrfmtcat(*extra, ", grp_submit_jobs=%u",
+			   qos->grp_submit_jobs);
+	} else if((int)qos->grp_submit_jobs == INFINITE) {
+		xstrcat(*cols, ", grp_submit_jobs");
+		xstrcat(*vals, ", NULL");
+		xstrcat(*extra, ", grp_submit_jobs=NULL");
+	}
+
+	if((int)qos->grp_wall >= 0) {
+		xstrcat(*cols, ", grp_wall");
+		xstrfmtcat(*vals, ", %u", qos->grp_wall);
+		xstrfmtcat(*extra, ", grp_wall=%u",
+			   qos->grp_wall);
+	} else if((int)qos->grp_wall == INFINITE) {
+		xstrcat(*cols, ", grp_wall");
+		xstrcat(*vals, ", NULL");
+		xstrcat(*extra, ", grp_wall=NULL");
+	}
+
+	if((int)qos->max_cpu_mins_pu >= 0) {
+		xstrcat(*cols, ", max_cpu_mins_per_user");
+		xstrfmtcat(*vals, ", %llu", qos->max_cpu_mins_pu);
+		xstrfmtcat(*extra, ", max_cpu_mins_per_user=%u",
+			   qos->max_cpu_mins_pu);
+	} else if((int)qos->max_cpu_mins_pu == INFINITE) {
+		xstrcat(*cols, ", max_cpu_mins_per_user");
+		xstrcat(*vals, ", NULL");
+		xstrcat(*extra, ", max_cpu_mins_per_user=NULL");
+	}
+
+	if((int)qos->max_cpus_pu >= 0) {
+		xstrcat(*cols, ", max_cpus_per_user");
+		xstrfmtcat(*vals, ", %u", qos->max_cpus_pu);
+		xstrfmtcat(*extra, ", max_cpus_per_user=%u",
+			   qos->max_cpus_pu);
+	} else if((int)qos->max_cpus_pu == INFINITE) {
+		xstrcat(*cols, ", max_cpus_per_user");
+		xstrcat(*vals, ", NULL");
+		xstrcat(*extra, ", max_cpus_per_user=NULL");
+	}
+		
+	if((int)qos->max_jobs_pu >= 0) {
+		xstrcat(*cols, ", max_jobs_per_user");
+		xstrfmtcat(*vals, ", %u", qos->max_jobs_pu);
+		xstrfmtcat(*extra, ", max_jobs_per_user=%u",
+			   qos->max_jobs_pu);
+	} else if((int)qos->max_jobs_pu == INFINITE) {
+		xstrcat(*cols, ", max_jobs_per_user");
+		xstrcat(*vals, ", NULL");
+		xstrcat(*extra, ", max_jobs_per_user=NULL");		
+	}
+
+	if((int)qos->max_nodes_pu >= 0) {
+		xstrcat(*cols, ", max_nodes_per_user");
+		xstrfmtcat(*vals, ", %u", qos->max_nodes_pu);
+		xstrfmtcat(*extra, ", max_nodes_per_user=%u",
+			   qos->max_nodes_pu);
+	} else if((int)qos->max_nodes_pu == INFINITE) {
+		xstrcat(*cols, ", max_nodes_per_user");
+		xstrcat(*vals, ", NULL");
+		xstrcat(*extra, ", max_nodes_per_user=NULL");
+	}
+
+	if((int)qos->max_submit_jobs_pu >= 0) {
+		xstrcat(*cols, ", max_submit_jobs_per_user");
+		xstrfmtcat(*vals, ", %u", qos->max_submit_jobs_pu);
+		xstrfmtcat(*extra, ", max_submit_jobs_per_user=%u",
+			   qos->max_submit_jobs_pu);
+	} else if((int)qos->max_submit_jobs_pu == INFINITE) {
+		xstrcat(*cols, ", max_submit_jobs_per_user");
+		xstrcat(*vals, ", NULL");
+		xstrcat(*extra, ", max_submit_jobs_per_user=NULL");
+	}
+
+	if((int)qos->max_wall_pu >= 0) {
+		xstrcat(*cols, ", max_wall_duration_per_user");
+		xstrfmtcat(*vals, ", %u", qos->max_wall_pu);
+		xstrfmtcat(*extra, ", max_wall_duration_per_user=%u",
+			   qos->max_wall_pu);
+	} else if((int)qos->max_wall_pu == INFINITE) {
+		xstrcat(*cols, ", max_wall_duration_per_user");
+		xstrcat(*vals, ", NULL");
+		xstrcat(*extra, ", max_wall_duration_per_user=NULL");
+	}
+
+	if((qos_level != QOS_LEVEL_MODIFY)
+	   && qos->preemptee_list && list_count(qos->preemptee_list)) {
+		char *qos_val = NULL;
+		char *tmp_char = NULL;
+		ListIterator qos_itr = 
+			list_iterator_create(qos->preemptee_list);
+		
+		xstrcat(*cols, ", qos");
+		
+		while((tmp_char = list_next(qos_itr))) 
+			xstrfmtcat(qos_val, ",%s", tmp_char);
+		
+		list_iterator_destroy(qos_itr);
+		
+		xstrfmtcat(*vals, ", \"%s\"", qos_val); 		
+		xstrfmtcat(*extra, ", preemptees=\"%s\"", qos_val); 
+		xfree(qos_val);
+	} 
+
+	if((qos_level != QOS_LEVEL_MODIFY)
+	   && qos->preemptor_list && list_count(qos->preemptor_list)) {
+		char *qos_val = NULL;
+		char *tmp_char = NULL;
+		ListIterator qos_itr = 
+			list_iterator_create(qos->preemptor_list);
+		
+		xstrcat(*cols, ", qos");
+		
+		while((tmp_char = list_next(qos_itr))) 
+			xstrfmtcat(qos_val, ",%s", tmp_char);
+		
+		list_iterator_destroy(qos_itr);
+		
+		xstrfmtcat(*vals, ", \"%s\"", qos_val); 		
+		xstrfmtcat(*extra, ", preemptors=\"%s\"", qos_val); 
+		xfree(qos_val);
+	} 
+	
+	if(qos->job_flags) {
+		xstrcat(*cols, ", job_flags");
+		xstrfmtcat(*vals, ", \"%s\"", qos->job_flags);
+		xstrfmtcat(*extra, ", job_flags=\"%s\"",
+			   qos->job_flags);
+	}
+
+	return SLURM_SUCCESS;
+
+}
+
+/* when doing a select on this all the select should have a prefix of
+ * t1. */
+static int _setup_association_cond_limits(acct_association_cond_t *assoc_cond,
+					  char **extra)
+{
+	int set = 0;
+	ListIterator itr = NULL;
+	char *object = NULL;
+	char *prefix = "t1";
+	if(!assoc_cond)
+		return 0;
+
+	if(assoc_cond->with_sub_accts) {
+		prefix = "t2";
+		xstrfmtcat(*extra, ", %s as t2 where "
+			   "(t1.lft between t2.lft and t2.rgt) &&",
+			   assoc_table);
+	} else 
+		xstrcat(*extra, " where");
+	
+	if(assoc_cond->with_deleted) 
+		xstrfmtcat(*extra, " (%s.deleted=0 || %s.deleted=1)",
+			prefix, prefix);
+	else 
+		xstrfmtcat(*extra, " %s.deleted=0", prefix);
+
+	if(assoc_cond->acct_list && list_count(assoc_cond->acct_list)) {
+		set = 0;
+		xstrcat(*extra, " && (");
+		itr = list_iterator_create(assoc_cond->acct_list);
+		while((object = list_next(itr))) {
+			if(set) 
+				xstrcat(*extra, " || ");
+			xstrfmtcat(*extra, "%s.acct=\"%s\"", prefix, object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(*extra, ")");
+	}
+
+	if(assoc_cond->cluster_list && list_count(assoc_cond->cluster_list)) {
+		set = 0;
+		xstrcat(*extra, " && (");
+		itr = list_iterator_create(assoc_cond->cluster_list);
+		while((object = list_next(itr))) {
+			if(set) 
+				xstrcat(*extra, " || ");
+			xstrfmtcat(*extra, "%s.cluster=\"%s\"", prefix, object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(*extra, ")");
+	}
+
+	if(assoc_cond->fairshare_list
+	   && list_count(assoc_cond->fairshare_list)) {
+		set = 0;
+		xstrcat(*extra, " && (");
+		itr = list_iterator_create(assoc_cond->fairshare_list);
+		while((object = list_next(itr))) {
+			if(set) 
+				xstrcat(*extra, " || ");
+			xstrfmtcat(*extra, "%s.fairshare=\"%s\"", 
+				   prefix, object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(*extra, ")");
+	}
+
+	if(assoc_cond->grp_cpu_mins_list
+	   && list_count(assoc_cond->grp_cpu_mins_list)) {
+		set = 0;
+		xstrcat(*extra, " && (");
+		itr = list_iterator_create(assoc_cond->grp_cpu_mins_list);
+		while((object = list_next(itr))) {
+			if(set) 
+				xstrcat(*extra, " || ");
+			xstrfmtcat(*extra, "%s.grp_cpu_mins=\"%s\"", 
+				   prefix, object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(*extra, ")");
+	}
+
+	if(assoc_cond->grp_cpus_list
+	   && list_count(assoc_cond->grp_cpus_list)) {
+		set = 0;
+		xstrcat(*extra, " && (");
+		itr = list_iterator_create(assoc_cond->grp_cpus_list);
+		while((object = list_next(itr))) {
+			if(set) 
+				xstrcat(*extra, " || ");
+			xstrfmtcat(*extra, "%s.grp_cpus=\"%s\"", 
+				   prefix, object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(*extra, ")");
+	}
+
+	if(assoc_cond->grp_jobs_list
+	   && list_count(assoc_cond->grp_jobs_list)) {
+		set = 0;
+		xstrcat(*extra, " && (");
+		itr = list_iterator_create(assoc_cond->grp_jobs_list);
+		while((object = list_next(itr))) {
+			if(set) 
+				xstrcat(*extra, " || ");
+			xstrfmtcat(*extra, "%s.grp_jobs=\"%s\"",
+				   prefix, object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(*extra, ")");
+	}
+
+	if(assoc_cond->grp_nodes_list
+	   && list_count(assoc_cond->grp_nodes_list)) {
+		set = 0;
+		xstrcat(*extra, " && (");
+		itr = list_iterator_create(assoc_cond->grp_nodes_list);
+		while((object = list_next(itr))) {
+			if(set) 
+				xstrcat(*extra, " || ");
+			xstrfmtcat(*extra, "%s.grp_nodes=\"%s\"",
+				   prefix, object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(*extra, ")");
+	}
+
+	if(assoc_cond->grp_submit_jobs_list
+	   && list_count(assoc_cond->grp_submit_jobs_list)) {
+		set = 0;
+		xstrcat(*extra, " && (");
+		itr = list_iterator_create(assoc_cond->grp_submit_jobs_list);
+		while((object = list_next(itr))) {
+			if(set) 
+				xstrcat(*extra, " || ");
+			xstrfmtcat(*extra, "%s.grp_submit_jobs=\"%s\"",
+				   prefix, object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(*extra, ")");
+	}
+
+	if(assoc_cond->grp_wall_list
+	   && list_count(assoc_cond->grp_wall_list)) {
+		set = 0;
+		xstrcat(*extra, " && (");
+		itr = list_iterator_create(assoc_cond->grp_wall_list);
+		while((object = list_next(itr))) {
+			if(set) 
+				xstrcat(*extra, " || ");
+			xstrfmtcat(*extra, "%s.grp_wall=\"%s\"", 
+				   prefix, object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(*extra, ")");
+	}
+
+	if(assoc_cond->max_cpu_mins_pj_list
+	   && list_count(assoc_cond->max_cpu_mins_pj_list)) {
+		set = 0;
+		xstrcat(*extra, " && (");
+		itr = list_iterator_create(assoc_cond->max_cpu_mins_pj_list);
+		while((object = list_next(itr))) {
+			if(set) 
+				xstrcat(*extra, " || ");
+			xstrfmtcat(*extra, "%s.max_cpu_mins_per_job=\"%s\"", 
+				   prefix, object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(*extra, ")");
+	}
+
+	if(assoc_cond->max_cpus_pj_list
+	   && list_count(assoc_cond->max_cpus_pj_list)) {
+		set = 0;
+		xstrcat(*extra, " && (");
+		itr = list_iterator_create(assoc_cond->max_cpus_pj_list);
+		while((object = list_next(itr))) {
+			if(set) 
+				xstrcat(*extra, " || ");
+			xstrfmtcat(*extra, "%s.max_cpus_per_job=\"%s\"", 
+				   prefix, object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(*extra, ")");
+	}
+
+	if(assoc_cond->max_jobs_list
+	   && list_count(assoc_cond->max_jobs_list)) {
+		set = 0;
+		xstrcat(*extra, " && (");
+		itr = list_iterator_create(assoc_cond->max_jobs_list);
+		while((object = list_next(itr))) {
+			if(set) 
+				xstrcat(*extra, " || ");
+			xstrfmtcat(*extra, "%s.max_jobs=\"%s\"", 
+				   prefix, object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(*extra, ")");
+	}
+
+	if(assoc_cond->max_nodes_pj_list
+	   && list_count(assoc_cond->max_nodes_pj_list)) {
+		set = 0;
+		xstrcat(*extra, " && (");
+		itr = list_iterator_create(assoc_cond->max_nodes_pj_list);
+		while((object = list_next(itr))) {
+			if(set) 
+				xstrcat(*extra, " || ");
+			xstrfmtcat(*extra, "%s.max_nodes_per_job=\"%s\"", 
+				   prefix, object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(*extra, ")");
+	}
+
+	if(assoc_cond->max_submit_jobs_list
+	   && list_count(assoc_cond->max_submit_jobs_list)) {
+		set = 0;
+		xstrcat(*extra, " && (");
+		itr = list_iterator_create(assoc_cond->max_submit_jobs_list);
+		while((object = list_next(itr))) {
+			if(set) 
+				xstrcat(*extra, " || ");
+			xstrfmtcat(*extra, "%s.max_submit_jobs=\"%s\"", 
+				   prefix, object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(*extra, ")");
+	}
+
+	if(assoc_cond->max_wall_pj_list
+	   && list_count(assoc_cond->max_wall_pj_list)) {
+		set = 0;
+		xstrcat(*extra, " && (");
+		itr = list_iterator_create(assoc_cond->max_wall_pj_list);
+		while((object = list_next(itr))) {
+			if(set) 
+				xstrcat(*extra, " || ");
+			xstrfmtcat(*extra,
+				   "%s.max_wall_duration_per_job=\"%s\"",
+				   prefix, object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(*extra, ")");
+	}
+	
+	if(assoc_cond->user_list && list_count(assoc_cond->user_list)) {
+		set = 0;
+		xstrcat(*extra, " && (");
+		itr = list_iterator_create(assoc_cond->user_list);
+		while((object = list_next(itr))) {
+			if(set) 
+				xstrcat(*extra, " || ");
+			xstrfmtcat(*extra, "%s.user=\"%s\"", prefix, object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(*extra, ")");
+	}
+
+	if(assoc_cond->partition_list 
+	   && list_count(assoc_cond->partition_list)) {
+		set = 0;
+		xstrcat(*extra, " && (");
+		itr = list_iterator_create(assoc_cond->partition_list);
+		while((object = list_next(itr))) {
+			if(set) 
+				xstrcat(*extra, " || ");
+			xstrfmtcat(*extra, "%s.partition=\"%s\"",
+				   prefix, object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(*extra, ")");
+	}
+
+	if(assoc_cond->id_list && list_count(assoc_cond->id_list)) {
+		set = 0;
+		xstrcat(*extra, " && (");
+		itr = list_iterator_create(assoc_cond->id_list);
+		while((object = list_next(itr))) {
+			if(set) 
+				xstrcat(*extra, " || ");
+			xstrfmtcat(*extra, "%s.id=%s", prefix, object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(*extra, ")");
+	}
+	
+	if(assoc_cond->qos_list && list_count(assoc_cond->qos_list)) {
+		set = 0;
+		xstrcat(*extra, " && (");
+		itr = list_iterator_create(assoc_cond->qos_list);
+		while((object = list_next(itr))) {
+			if(set) 
+				xstrcat(*extra, " || ");
+			xstrfmtcat(*extra, 
+				   "(%s.qos like '%%,%s' "
+				   "|| %s.qos like '%%,%s,%%' "
+				   "|| %s.delta_qos like '%%,+%s' "
+				   "|| %s.delta_qos like '%%,+%s,%%')",
+				   prefix, object, prefix, object,
+				   prefix, object, prefix, object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(*extra, ")");
+	}
+	
+	if(assoc_cond->parent_acct_list
+	   && list_count(assoc_cond->parent_acct_list)) {
+		set = 0;
+		xstrcat(*extra, " && (");
+		itr = list_iterator_create(assoc_cond->parent_acct_list);
+		while((object = list_next(itr))) {
+			if(set) 
+				xstrcat(*extra, " || ");
+			xstrfmtcat(*extra, "%s.parent_acct=\"%s\"",
+				   prefix, object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(*extra, ")");
+	}
+	return set;
+}
 /* This function will take the object given and free it later so it
  * needed to be removed from a list if in one before 
  */
@@ -199,6 +977,7 @@ static int _addto_update_list(List update_list, acct_update_type_t type,
 			destroy_acct_association_rec);
 		break;
 	case ACCT_ADD_QOS:
+	case ACCT_MODIFY_QOS:
 	case ACCT_REMOVE_QOS:
 		update_object->objects = list_create(
 			destroy_acct_qos_rec);
@@ -217,7 +996,7 @@ static int _addto_update_list(List update_list, acct_update_type_t type,
  */
 static int _move_account(mysql_conn_t *mysql_conn, uint32_t lft, uint32_t rgt,
 			 char *cluster,
-			 char *id, char *parent)
+			 char *id, char *parent, time_t now)
 {
 	int rc = SLURM_SUCCESS;
 	MYSQL_RES *result = NULL;
@@ -227,7 +1006,7 @@ static int _move_account(mysql_conn_t *mysql_conn, uint32_t lft, uint32_t rgt,
 	int width = 0;
 	char *query = xstrdup_printf(
 		"SELECT lft from %s " 
-		"where cluster='%s' && acct='%s' && user='';",
+		"where cluster=\"%s\" && acct=\"%s\" && user='';",
 		assoc_table,
 		cluster, parent);
 	debug3("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
@@ -258,41 +1037,43 @@ static int _move_account(mysql_conn_t *mysql_conn, uint32_t lft, uint32_t rgt,
 	/* every thing below needs to be a %d not a %u because we are
 	   looking for -1 */
 	xstrfmtcat(query,
-		   "update %s set deleted = deleted + 2, "
+		   "update %s set mod_time=%d, deleted = deleted + 2, "
 		   "lft = lft + %d, rgt = rgt + %d "
 		   "WHERE lft BETWEEN %d AND %d;",
-		   assoc_table, diff, diff, lft, rgt);
+		   assoc_table, now, diff, diff, lft, rgt);
 
 	xstrfmtcat(query,
-		   "UPDATE %s SET rgt = rgt + %d WHERE "
+		   "UPDATE %s SET mod_time=%d, rgt = rgt + %d WHERE "
 		   "rgt > %d && deleted < 2;"
-		   "UPDATE %s SET lft = lft + %d WHERE "
+		   "UPDATE %s SET mod_time=%d, lft = lft + %d WHERE "
 		   "lft > %d && deleted < 2;",
-		   assoc_table, width,
+		   assoc_table, now, width,
 		   par_left,
-		   assoc_table, width,
+		   assoc_table, now, width,
 		   par_left);
 
 	xstrfmtcat(query,
-		   "UPDATE %s SET rgt = rgt - %d WHERE "
+		   "UPDATE %s SET mod_time=%d, rgt = rgt - %d WHERE "
 		   "(%d < 0 && rgt > %d && deleted < 2) "
 		   "|| (%d > 0 && rgt > %d);"
-		   "UPDATE %s SET lft = lft - %d WHERE "
+		   "UPDATE %s SET mod_time=%d, lft = lft - %d WHERE "
 		   "(%d < 0 && lft > %d && deleted < 2) "
 		   "|| (%d > 0 && lft > %d);",
-		   assoc_table, width,
+		   assoc_table, now, width,
 		   diff, rgt,
 		   diff, lft,
-		   assoc_table, width,
+		   assoc_table, now, width,
 		   diff, rgt,
 		   diff, lft);
 
 	xstrfmtcat(query,
-		   "update %s set deleted = deleted - 2 WHERE deleted > 1;",
-		   assoc_table);
+		   "update %s set mod_time=%d, "
+		   "deleted = deleted - 2 WHERE deleted > 1;",
+		   assoc_table, now);
 	xstrfmtcat(query,
-		   "update %s set parent_acct='%s' where id = %s;",
-		   assoc_table, parent, id);
+		   "update %s set mod_time=%d, "
+		   "parent_acct=\"%s\" where id = %s;",
+		   assoc_table, now, parent, id);
 	debug3("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
 	rc = mysql_db_query(mysql_conn->db_conn, query);
 	xfree(query);
@@ -308,15 +1089,13 @@ static int _move_account(mysql_conn_t *mysql_conn, uint32_t lft, uint32_t rgt,
 static int _move_parent(mysql_conn_t *mysql_conn, uid_t uid,
 			uint32_t lft, uint32_t rgt,
 			char *cluster,
-			char *id, char *old_parent, char *new_parent)
+			char *id, char *old_parent, char *new_parent,
+			time_t now)
 {
 	MYSQL_RES *result = NULL;
 	MYSQL_ROW row;
 	char *query = NULL;
 	int rc = SLURM_SUCCESS;
-	List assoc_list = NULL;
-	ListIterator itr = NULL;
-	acct_association_rec_t *assoc = NULL;
 		
 	/* first we need to see if we are going to make a child of this
 	 * account the new parent.  If so we need to move that child to this
@@ -324,7 +1103,7 @@ static int _move_parent(mysql_conn_t *mysql_conn, uid_t uid,
 	 */
 	query = xstrdup_printf(
 		"select id, lft, rgt from %s where lft between %d and %d "
-		"&& acct='%s' && user='' order by lft;",
+		"&& acct=\"%s\" && user='' order by lft;",
 		assoc_table, lft, rgt,
 		new_parent);
 	debug3("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
@@ -339,7 +1118,7 @@ static int _move_parent(mysql_conn_t *mysql_conn, uid_t uid,
 		debug4("%s(%s) %s,%s is a child of %s",
 		       new_parent, row[0], row[1], row[2], id);
 		rc = _move_account(mysql_conn, atoi(row[1]), atoi(row[2]),
-				   cluster, row[0], old_parent);
+				   cluster, row[0], old_parent, now);
 	}
 
 	mysql_free_result(result);
@@ -364,7 +1143,7 @@ static int _move_parent(mysql_conn_t *mysql_conn, uid_t uid,
 
 	if((row = mysql_fetch_row(result))) {
 		rc = _move_account(mysql_conn, atoi(row[0]), atoi(row[1]),
-				   cluster, id, new_parent);
+				   cluster, id, new_parent, now);
 	} else {
 		error("can't find parent? we were able to a second ago.");
 		rc = SLURM_ERROR;
@@ -374,26 +1153,6 @@ static int _move_parent(mysql_conn_t *mysql_conn, uid_t uid,
 	if(rc == SLURM_ERROR) 
 		return rc;
 	
-	/* now we need to send the update of the new parents and
-	 * limits, so just to be safe, send the whole tree
-	 */
-	assoc_list = acct_storage_p_get_associations(mysql_conn, uid, NULL);
-	/* NOTE: you can not use list_pop, or list_push
-	   anywhere either, since mysql is
-	   exporting something of the same type as a macro,
-	   which messes everything up (my_list.h is the bad boy).
-	   So we are just going to delete each item as it
-	   comes out since we are moving it to the update_list.
-	*/
-	itr = list_iterator_create(assoc_list);
-	while((assoc = list_next(itr))) {
-		if(_addto_update_list(mysql_conn->update_list, 
-				      ACCT_MODIFY_ASSOC,
-				      assoc) == SLURM_SUCCESS) 
-			list_remove(itr);
-	}
-	list_iterator_destroy(itr);
-	list_destroy(assoc_list);
 	return rc;
 }
 
@@ -434,6 +1193,11 @@ static int _modify_common(mysql_conn_t *mysql_conn,
 {
 	char *query = NULL;
 	int rc = SLURM_SUCCESS;
+	char *tmp_cond_char = _fix_double_quotes(cond_char);
+	char *tmp_vals = NULL;
+
+	if(vals[1])
+		tmp_vals = _fix_double_quotes(vals+2);
 
 	xstrfmtcat(query, 
 		   "update %s set mod_time=%d%s "
@@ -443,9 +1207,11 @@ static int _modify_common(mysql_conn_t *mysql_conn,
 	xstrfmtcat(query, 	
 		   "insert into %s "
 		   "(timestamp, action, name, actor, info) "
-		   "values (%d, %d, \"%s\", '%s', \"%s\");",
+		   "values (%d, %d, \"%s\", \"%s\", \"%s\");",
 		   txn_table,
-		   now, type, cond_char, user_name, vals);
+		   now, type, tmp_cond_char, user_name, tmp_vals);
+	xfree(tmp_cond_char);
+	xfree(tmp_vals);
 	debug3("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);		
 	rc = mysql_db_query(mysql_conn->db_conn, query);
 	xfree(query);
@@ -470,7 +1236,7 @@ static int _modify_unset_users(mysql_conn_t *mysql_conn,
 			       acct_association_rec_t *assoc,
 			       char *acct,
 			       uint32_t lft, uint32_t rgt,
-			       List ret_list)
+			       List ret_list, int moved_parent)
 {
 	MYSQL_RES *result = NULL;
 	MYSQL_ROW row;
@@ -484,9 +1250,13 @@ static int _modify_unset_users(mysql_conn_t *mysql_conn,
 		"cluster",
 		"partition",
 		"max_jobs",
+		"max_submit_jobs",
 		"max_nodes_per_job",
+		"max_cpus_per_job",
 		"max_wall_duration_per_job",
-		"max_cpu_secs_per_job",
+		"max_cpu_mins_per_job",
+		"qos",
+		"delta_qos",
 		"lft",
 		"rgt"
 	};
@@ -498,9 +1268,13 @@ static int _modify_unset_users(mysql_conn_t *mysql_conn,
 		ASSOC_CLUSTER,
 		ASSOC_PART,
 		ASSOC_MJ,
+		ASSOC_MSJ,
 		ASSOC_MNPJ,
-		ASSOC_MWPJ,
 		ASSOC_MCPJ,
+		ASSOC_MWPJ,
+		ASSOC_MCMPJ,
+		ASSOC_QOS,
+		ASSOC_DELTA_QOS,
 		ASSOC_LFT,
 		ASSOC_RGT,
 		ASSOC_COUNT
@@ -518,8 +1292,8 @@ static int _modify_unset_users(mysql_conn_t *mysql_conn,
 	/* We want all the sub accounts and user accounts */
 	query = xstrdup_printf("select distinct %s from %s where deleted=0 "
 			       "&& lft between %d and %d && "
-			       "((user = '' && parent_acct = '%s') || "
-			       "(user != '' && acct = '%s')) "
+			       "((user = '' && parent_acct = \"%s\") || "
+			       "(user != '' && acct = \"%s\")) "
 			       "order by lft;",
 			       object, assoc_table, lft, rgt, acct, acct);
 	xfree(object);
@@ -536,39 +1310,90 @@ static int _modify_unset_users(mysql_conn_t *mysql_conn,
 		int modified = 0;
 
 		mod_assoc = xmalloc(sizeof(acct_association_rec_t));
+		init_acct_association_rec(mod_assoc);
+
 		mod_assoc->id = atoi(row[ASSOC_ID]);
 
 		if(!row[ASSOC_MJ] && assoc->max_jobs != NO_VAL) {
 			mod_assoc->max_jobs = assoc->max_jobs;
 			modified = 1;
-		} else
-			mod_assoc->max_jobs = NO_VAL;
-		
-		if(!row[ASSOC_MNPJ] &&
-		   assoc->max_nodes_per_job != NO_VAL) {
-			mod_assoc->max_nodes_per_job =
-				assoc->max_nodes_per_job;
+		}
+
+		if(!row[ASSOC_MSJ] && assoc->max_submit_jobs != NO_VAL) {
+			mod_assoc->max_submit_jobs = assoc->max_submit_jobs;
 			modified = 1;
-		} else 
-			mod_assoc->max_nodes_per_job = NO_VAL;
+		} 
 
+		if(!row[ASSOC_MNPJ] && assoc->max_nodes_pj != NO_VAL) {
+			mod_assoc->max_nodes_pj = assoc->max_nodes_pj;
+			modified = 1;
+		} 
 		
-		if(!row[ASSOC_MWPJ] && 
-		   assoc->max_wall_duration_per_job != NO_VAL) {
-			mod_assoc->max_wall_duration_per_job =
-				assoc->max_wall_duration_per_job;
+		if(!row[ASSOC_MCPJ] && assoc->max_cpus_pj != NO_VAL) {
+			mod_assoc->max_cpus_pj = assoc->max_cpus_pj;
 			modified = 1;
-		} else 
-			mod_assoc->max_wall_duration_per_job = NO_VAL;
+		} 
+		
+		if(!row[ASSOC_MWPJ] && assoc->max_wall_pj != NO_VAL) {
+			mod_assoc->max_wall_pj = assoc->max_wall_pj;
+			modified = 1;
+		}
 					
-		if(!row[ASSOC_MCPJ] && 
-		   assoc->max_cpu_secs_per_job != NO_VAL) {
-			mod_assoc->max_cpu_secs_per_job = 
-				assoc->max_cpu_secs_per_job;
+		if(!row[ASSOC_MCMPJ] && assoc->max_cpu_mins_pj != NO_VAL) {
+			mod_assoc->max_cpu_mins_pj = assoc->max_cpu_mins_pj;
 			modified = 1;
-		} else
-			mod_assoc->max_cpu_secs_per_job = NO_VAL;
-		
+		} 
+
+		if(!row[ASSOC_QOS][0] && assoc->qos_list) {
+			List delta_qos_list = NULL;
+			char *qos_char = NULL, *delta_char = NULL;
+			ListIterator delta_itr = NULL;
+			ListIterator qos_itr = 
+				list_iterator_create(assoc->qos_list);
+			if(row[ASSOC_DELTA_QOS][0]) {
+				delta_qos_list =
+					list_create(slurm_destroy_char);
+				slurm_addto_char_list(delta_qos_list,
+						      row[ASSOC_DELTA_QOS]+1);
+				delta_itr = 
+					list_iterator_create(delta_qos_list);
+			}
+
+			mod_assoc->qos_list = list_create(slurm_destroy_char);
+			/* here we are making sure a child does not
+			   have the qos added or removed before we add
+			   it to the parent.
+			*/
+			while((qos_char = list_next(qos_itr))) {
+				if(delta_itr && qos_char[0] != '=') {
+					while((delta_char = 
+					       list_next(delta_itr))) {
+						
+						if((qos_char[0] 
+						    != delta_char[0])
+						   && (!strcmp(qos_char+1, 
+							       delta_char+1))) 
+							break;			
+					}
+					list_iterator_reset(delta_itr);
+					if(delta_char)
+						continue;
+				}
+				list_append(mod_assoc->qos_list,
+					    xstrdup(qos_char));
+			}
+			list_iterator_destroy(qos_itr);
+			if(delta_itr)
+				list_iterator_destroy(delta_itr);
+			if(list_count(mod_assoc->qos_list) 
+			   || !list_count(assoc->qos_list))
+				modified = 1;
+			else {
+				list_destroy(mod_assoc->qos_list);
+				mod_assoc->qos_list = NULL;
+			}
+		}
+
 		/* We only want to add those that are modified here */
 		if(modified) {
 			/* Since we aren't really changing this non
@@ -583,7 +1408,7 @@ static int _modify_unset_users(mysql_conn_t *mysql_conn,
 						    row[ASSOC_ACCT],
 						    atoi(row[ASSOC_LFT]),
 						    atoi(row[ASSOC_RGT]),
-						    ret_list);
+						    ret_list, moved_parent);
 				destroy_acct_association_rec(mod_assoc);
 				continue;
 			}
@@ -604,14 +1429,19 @@ static int _modify_unset_users(mysql_conn_t *mysql_conn,
 			}
 			
 			list_append(ret_list, object);
-			
-			if(_addto_update_list(mysql_conn->update_list, 
-					      ACCT_MODIFY_ASSOC,
-					      mod_assoc) != SLURM_SUCCESS) 
-				error("couldn't add to the update list");
-		} else {
-			xfree(mod_assoc);
-		}
+
+			if(moved_parent)
+				destroy_acct_association_rec(mod_assoc);
+			else
+				if(_addto_update_list(mysql_conn->update_list, 
+						      ACCT_MODIFY_ASSOC,
+						      mod_assoc)
+				   != SLURM_SUCCESS) 
+					error("couldn't add to "
+					      "the update list");
+		} else 
+			destroy_acct_association_rec(mod_assoc);
+		
 	}
 	mysql_free_result(result);
 
@@ -701,13 +1531,13 @@ static int _remove_common(mysql_conn_t *mysql_conn,
 	MYSQL_ROW row;
 	time_t day_old = now - DELETE_SEC_BACK;
 	bool has_jobs = false;
+	char *tmp_name_char = _fix_double_quotes(name_char);
 
 	/* If we have jobs associated with this we do not want to
 	 * really delete it for accounting purposes.  This is for
 	 * corner cases most of the time this won't matter.
 	 */
-	if(table == acct_coord_table
-	   || table == qos_table) {
+	if(table == acct_coord_table || table == qos_table) {
 		/* This doesn't apply for these tables since we are
 		 * only looking for association type tables.
 		 */
@@ -732,9 +1562,10 @@ static int _remove_common(mysql_conn_t *mysql_conn,
 	
 	xstrfmtcat(query, 	
 		   "insert into %s (timestamp, action, name, actor) "
-		   "values (%d, %d, \"%s\", '%s');",
+		   "values (%d, %d, \"%s\", \"%s\");",
 		   txn_table,
-		   now, type, name_char, user_name);
+		   now, type, tmp_name_char, user_name);
+	xfree(tmp_name_char);
 
 	debug3("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
 	rc = mysql_db_query(mysql_conn->db_conn, query);
@@ -749,16 +1580,10 @@ static int _remove_common(mysql_conn_t *mysql_conn,
 	}
 	
 	if(table == qos_table) {
-		/* remove this qos from all the users/accts that have it
-		 */
-		xstrfmtcat(query,
-			   "update %s set mod_time=%d, %s "
+		/* remove this qos from all the users/accts that have it */
+		xstrfmtcat(query, "update %s set mod_time=%d %s "
 			   "where deleted=0;",
-			   user_table, now, assoc_char);
-		xstrfmtcat(query,
-			   "update %s set mod_time=%d, %s "
-			   "where deleted=0;",
-			   acct_table, now, assoc_char);
+			   assoc_table, now, assoc_char);
 		debug3("%d(%d) query\n%s",
 		       mysql_conn->conn, __LINE__, query);
 		rc = mysql_db_query(mysql_conn->db_conn, query);
@@ -768,38 +1593,12 @@ static int _remove_common(mysql_conn_t *mysql_conn,
 				mysql_db_rollback(mysql_conn->db_conn);
 			}
 			list_flush(mysql_conn->update_list);
-			
-			return SLURM_ERROR;
-		}
-		/* now get what we changed and set the update */
-		xstrfmtcat(query,
-			   "select name, qos from %s where "
-			   "mod_time=%d and deleted=0;",
-			   user_table, now);
-		if(!(result = mysql_db_query_ret(
-			     mysql_conn->db_conn, query, 0))) {
-			xfree(query);
-			if(mysql_conn->rollback) {
-				mysql_db_rollback(mysql_conn->db_conn);
-			}
-			list_flush(mysql_conn->update_list);
-			
 			return SLURM_ERROR;
 		}
-		
-		rc = 0;
-		while((row = mysql_fetch_row(result))) {
-			acct_user_rec_t *user_rec = 
-				xmalloc(sizeof(acct_user_rec_t));
-			user_rec->name = xstrdup(row[0]);
-			user_rec->qos_list = list_create(slurm_destroy_char);
-			slurm_addto_char_list(user_rec->qos_list, row[1]);
-			_addto_update_list(mysql_conn->update_list,
-					   ACCT_MODIFY_USER,
-					   user_rec);
-		}
-		mysql_free_result(result);
-		
+		/* we don't have to send anything else since removing
+		   the qos in the first place will remove it from the
+		   clusters 
+		*/		
 		return SLURM_SUCCESS;
 	} else if(table == acct_coord_table)
 		return SLURM_SUCCESS;
@@ -991,7 +1790,7 @@ just_update:
 			       "fairshare=1, max_jobs=NULL, "
 			       "max_nodes_per_job=NULL, "
 			       "max_wall_duration_per_job=NULL, "
-			       "max_cpu_secs_per_job=NULL "
+			       "max_cpu_mins_per_job=NULL "
 			       "where (%s);",
 			       assoc_table, now,
 			       loc_assoc_char);
@@ -1032,7 +1831,7 @@ static int _get_account_coords(mysql_conn_t *mysql_conn,
 		acct->coordinators = list_create(destroy_acct_coord_rec);
 			
 	query = xstrdup_printf(
-		"select user from %s where acct='%s' && deleted=0",
+		"select user from %s where acct=\"%s\" && deleted=0",
 		acct_coord_table, acct->name);
 			
 	if(!(result =
@@ -1052,8 +1851,8 @@ static int _get_account_coords(mysql_conn_t *mysql_conn,
 	query = xstrdup_printf("select distinct t0.user from %s as t0, "
 			       "%s as t1, %s as t2 where t0.acct=t1.acct && "
 			       "t1.lft<t2.lft && t1.rgt>t2.lft && "
-			       "t1.user='' && t2.acct='%s' && t1.acct!='%s' && "
-			       "!t0.deleted;",
+			       "t1.user='' && t2.acct=\"%s\" "
+			       "&& t1.acct!=\"%s\" && !t0.deleted;",
 			       acct_coord_table, assoc_table, assoc_table,
 			       acct->name, acct->name);
 	if(!(result =
@@ -1091,7 +1890,7 @@ static int _get_user_coords(mysql_conn_t *mysql_conn, acct_user_rec_t *user)
 		user->coord_accts = list_create(destroy_acct_coord_rec);
 			
 	query = xstrdup_printf(
-		"select acct from %s where user='%s' && deleted=0",
+		"select acct from %s where user=\"%s\" && deleted=0",
 		acct_coord_table, user->name);
 			
 	if(!(result =
@@ -1110,16 +1909,16 @@ static int _get_user_coords(mysql_conn_t *mysql_conn, acct_user_rec_t *user)
 		else 
 			query = xstrdup_printf(
 				"select distinct t1.acct from "
-				"%s as t1, %s as t2 where ",
+				"%s as t1, %s as t2 where t1.deleted=0 && ",
 				assoc_table, assoc_table);
 		/* Make sure we don't get the same
 		 * account back since we want to keep
 		 * track of the sub-accounts.
 		 */
-		xstrfmtcat(query, "(t2.acct='%s' "
+		xstrfmtcat(query, "(t2.acct=\"%s\" "
 			   "&& t1.lft between t2.lft "
 			   "and t2.rgt && t1.user='' "
-			   "&& t1.acct!='%s')",
+			   "&& t1.acct!=\"%s\")",
 			   coord->name, coord->name);
 	}
 	mysql_free_result(result);
@@ -1219,7 +2018,6 @@ static int _mysql_acct_check_tables(MYSQL *db_conn)
 		{ "name", "tinytext not null" },
 		{ "description", "text not null" },
 		{ "organization", "text not null" },
-		{ "qos", "blob not null default ''" },
 		{ NULL, NULL}		
 	};
 
@@ -1237,9 +2035,19 @@ static int _mysql_acct_check_tables(MYSQL *db_conn)
 		{ "rgt", "int not null" },
 		{ "fairshare", "int default 1 not null" },
 		{ "max_jobs", "int default NULL" },
+		{ "max_submit_jobs", "int default NULL" },
+		{ "max_cpus_per_job", "int default NULL" },
 		{ "max_nodes_per_job", "int default NULL" },
 		{ "max_wall_duration_per_job", "int default NULL" },
-		{ "max_cpu_secs_per_job", "int default NULL" },
+		{ "max_cpu_mins_per_job", "bigint default NULL" },
+		{ "grp_jobs", "int default NULL" },
+		{ "grp_submit_jobs", "int default NULL" },
+		{ "grp_cpus", "int default NULL" },
+		{ "grp_nodes", "int default NULL" },
+		{ "grp_wall", "int default NULL" },
+		{ "grp_cpu_mins", "bigint default NULL" },
+		{ "qos", "blob not null default ''" },
+		{ "delta_qos", "blob not null default ''" },
 		{ NULL, NULL}		
 	};
 
@@ -1260,6 +2068,8 @@ static int _mysql_acct_check_tables(MYSQL *db_conn)
 		{ "name", "tinytext not null" },
 		{ "control_host", "tinytext not null default ''" },
 		{ "control_port", "mediumint not null default 0" },
+		{ "rpc_version", "mediumint not null default 0" },
+		{ "valid_qos", "blob" },
 		{ NULL, NULL}		
 	};
 
@@ -1294,6 +2104,7 @@ static int _mysql_acct_check_tables(MYSQL *db_conn)
 		{ "associd", "mediumint unsigned not null" },
 		{ "uid", "smallint unsigned not null" },
 		{ "gid", "smallint unsigned not null" },
+		{ "cluster", "tinytext" },
 		{ "partition", "tinytext not null" },
 		{ "blockid", "tinytext" },
 		{ "account", "tinytext" },
@@ -1329,6 +2140,22 @@ static int _mysql_acct_check_tables(MYSQL *db_conn)
 		{ "id", "int not null auto_increment" },
 		{ "name", "tinytext not null" }, 
 		{ "description", "text" }, 
+		{ "max_jobs_per_user", "int default NULL" },
+		{ "max_submit_jobs_per_user", "int default NULL" },
+		{ "max_cpus_per_user", "int default NULL" },
+		{ "max_nodes_per_user", "int default NULL" },
+		{ "max_wall_duration_per_user", "int default NULL" },
+		{ "max_cpu_mins_per_user", "bigint default NULL" },
+		{ "grp_jobs", "int default NULL" },
+		{ "grp_submit_jobs", "int default NULL" },
+		{ "grp_cpus", "int default NULL" },
+		{ "grp_nodes", "int default NULL" },
+		{ "grp_wall", "int default NULL" },
+		{ "grp_cpu_mins", "bigint default NULL" },
+		{ "job_flags", "text" },
+		{ "preemptees", "text not null default ''" },
+		{ "preemptors", "text not null default ''" },
+		{ "priority", "int default 0" },
 		{ NULL, NULL}		
 	};
 
@@ -1379,9 +2206,9 @@ static int _mysql_acct_check_tables(MYSQL *db_conn)
 		{ "id", "int not null auto_increment" },
 		{ "timestamp", "int unsigned default 0 not null" },
 		{ "action", "smallint not null" },
-		{ "name", "tinytext not null" },
+		{ "name", "text not null" },
 		{ "actor", "tinytext not null" },
-		{ "info", "text" },
+		{ "info", "blob" },
 		{ NULL, NULL}		
 	};
 
@@ -1391,7 +2218,6 @@ static int _mysql_acct_check_tables(MYSQL *db_conn)
 		{ "deleted", "tinyint default 0" },
 		{ "name", "tinytext not null" },
 		{ "default_acct", "tinytext not null" },
-		{ "qos", "blob not null default ''" },
 		{ "admin_level", "smallint default 1 not null" },
 		{ NULL, NULL}		
 	};
@@ -1403,15 +2229,23 @@ static int _mysql_acct_check_tables(MYSQL *db_conn)
 		"begin "
 		"set @par_id = NULL; "
 		"set @mj = NULL; "
+		"set @msj = NULL; "
+		"set @mcpj = NULL; "
 		"set @mnpj = NULL; "
 		"set @mwpj = NULL; "
-		"set @mcpj = NULL; "
+		"set @mcmpj = NULL; "
+		"set @qos = ''; "
+		"set @delta_qos = ''; "
 		"set @my_acct = acct; "
 		"if without_limits then "
 		"set @mj = 0; " 
+		"set @msj = 0; " 
+		"set @mcpj = 0; "
 		"set @mnpj = 0; "
 		"set @mwpj = 0; "
-		"set @mcpj = 0; "
+		"set @mcmpj = 0; "
+		"set @qos = 0; "
+		"set @delta_qos = 0; "
 		"end if; "
 		"REPEAT "
 		"set @s = 'select '; "
@@ -1421,14 +2255,24 @@ static int _mysql_acct_check_tables(MYSQL *db_conn)
 		"if @mj is NULL then set @s = CONCAT("
 		"@s, '@mj := max_jobs, '); "
 		"end if; "
+		"if @msj is NULL then set @s = CONCAT("
+		"@s, '@msj := max_submit_jobs, '); "
+		"end if; "
+		"if @mcpj is NULL then set @s = CONCAT("
+		"@s, '@mcpj := max_cpus_per_job, ') ;"
+		"end if; "
 		"if @mnpj is NULL then set @s = CONCAT("
 		"@s, '@mnpj := max_nodes_per_job, ') ;"
 		"end if; "
 		"if @mwpj is NULL then set @s = CONCAT("
 		"@s, '@mwpj := max_wall_duration_per_job, '); "
 		"end if; "
-		"if @mcpj is NULL then set @s = CONCAT("
-		"@s, '@mcpj := max_cpu_secs_per_job, '); "
+		"if @mcmpj is NULL then set @s = CONCAT("
+		"@s, '@mcmpj := max_cpu_mins_per_job, '); "
+		"end if; "
+		"if @qos = '' then set @s = CONCAT("
+		"@s, '@qos := qos, "
+		"@delta_qos := CONCAT(@delta_qos, delta_qos), '); "
 		"end if; "
 		"set @s = concat(@s, ' @my_acct := parent_acct from ', "
 		"my_table, ' where acct = \"', @my_acct, '\" && "
@@ -1436,8 +2280,9 @@ static int _mysql_acct_check_tables(MYSQL *db_conn)
 		"prepare query from @s; "
 		"execute query; "
 		"deallocate prepare query; "
-		"UNTIL (@mj != -1 && @mnpj != -1 && @mwpj != -1 "
-		"&& @mcpj != -1) || @my_acct = '' END REPEAT; "
+		"UNTIL (@mj != -1 && @msj != -1 && @mcpj != -1 "
+		"&& @mnpj != -1 && @mwpj != -1 "
+		"&& @mcmpj != -1 && @qos != '') || @my_acct = '' END REPEAT; "
 		"END;";
 	char *query = NULL;
 	time_t now = time(NULL);
@@ -1473,9 +2318,7 @@ static int _mysql_acct_check_tables(MYSQL *db_conn)
 	if(mysql_db_create_table(db_conn, assoc_table, assoc_table_fields,
 				 ", primary key (id), "
 				 " unique index (user(20), acct(20), "
-				 "cluster(20), partition(20)))"
-/* 				 " unique index (lft), " */
-				 /* 				 " unique index (rgt))" */)
+				 "cluster(20), partition(20)))")
 	   == SLURM_ERROR)
 		return SLURM_ERROR;
 
@@ -1533,7 +2376,7 @@ static int _mysql_acct_check_tables(MYSQL *db_conn)
 			"on duplicate key update id=LAST_INSERT_ID(id), "
 			"deleted=0;",
 			qos_table, now, now);
-		debug3("%s", query);
+		//debug3("%s", query);
 		normal_qos_id = mysql_insert_ret_id(db_conn, query);
 		xfree(query);		
 	}
@@ -1575,7 +2418,7 @@ static int _mysql_acct_check_tables(MYSQL *db_conn)
 		   "update name='root';",
 		   acct_table, now); 
 
-	debug3("%s", query);
+	//debug3("%s", query);
 	mysql_db_query(db_conn, query);
 	xfree(query);		
 
@@ -1661,11 +2504,12 @@ extern int fini ( void )
 #endif
 }
 
-extern void *acct_storage_p_get_connection(bool make_agent, bool rollback)
+extern void *acct_storage_p_get_connection(bool make_agent, int conn_num,
+					   bool rollback)
 {
 #ifdef HAVE_MYSQL
 	mysql_conn_t *mysql_conn = xmalloc(sizeof(mysql_conn_t));
-	static int conn = 0;
+	
 	if(!mysql_db_info)
 		init();
 
@@ -1677,7 +2521,7 @@ extern void *acct_storage_p_get_connection(bool make_agent, bool rollback)
 	if(rollback) {
 		mysql_autocommit(mysql_conn->db_conn, 0);
 	}
-	mysql_conn->conn = conn++;
+	mysql_conn->conn = conn_num;
 	mysql_conn->update_list = list_create(destroy_acct_update_object);
 	return (void *)mysql_conn;
 #else
@@ -1732,14 +2576,12 @@ extern int acct_storage_p_commit(mysql_conn_t *mysql_conn, bool commit)
 		ListIterator itr = NULL;
 		acct_update_object_t *object = NULL;
 		
-		slurm_msg_t_init(&req);
-		slurm_msg_t_init(&resp);
-		
 		memset(&msg, 0, sizeof(accounting_update_msg_t));
 		msg.update_list = mysql_conn->update_list;
 		
-		xstrfmtcat(query, "select control_host, control_port from %s "
-			   "where deleted=0 && control_port != 0",
+		xstrfmtcat(query, "select control_host, control_port, "
+			   "name, rpc_version "
+			   "from %s where deleted=0 && control_port != 0",
 			   cluster_table);
 		if(!(result = mysql_db_query_ret(
 			     mysql_conn->db_conn, query, 0))) {
@@ -1748,16 +2590,20 @@ extern int acct_storage_p_commit(mysql_conn_t *mysql_conn, bool commit)
 		}
 		xfree(query);
 		while((row = mysql_fetch_row(result))) {
-			info("sending to %s(%s)", row[0], row[1]);
+			debug("sending updates to %s at %s(%s) ver %s",
+			      row[2], row[0], row[1], row[3]);
+			msg.rpc_version = atoi(row[3]);
+			slurm_msg_t_init(&req);
 			slurm_set_addr_char(&req.address, atoi(row[1]), row[0]);
 			req.msg_type = ACCOUNTING_UPDATE_MSG;
 			req.flags = SLURM_GLOBAL_AUTH_KEY;
 			req.data = &msg;			
+			slurm_msg_t_init(&resp);
 			
 			rc = slurm_send_recv_node_msg(&req, &resp, 0);
 			if ((rc != 0) || !resp.auth_cred) {
-				error("update cluster: %m to %s(%s)",
-				      row[0], row[1]);
+				error("update cluster: %m to %s at %s(%s)",
+				      row[2], row[0], row[1]);
 				if (resp.auth_cred)
 					g_slurm_auth_destroy(resp.auth_cred);
 				rc = SLURM_ERROR;
@@ -1805,6 +2651,7 @@ extern int acct_storage_p_commit(mysql_conn_t *mysql_conn, bool commit)
 				rc = assoc_mgr_update_local_assocs(object);
 				break;
 			case ACCT_ADD_QOS:
+			case ACCT_MODIFY_QOS:
 			case ACCT_REMOVE_QOS:
 				rc = assoc_mgr_update_local_qos(object);
 				break;
@@ -1856,26 +2703,9 @@ extern int acct_storage_p_add_users(mysql_conn_t *mysql_conn, uint32_t uid,
 		xstrcat(cols, "creation_time, mod_time, name, default_acct");
 		xstrfmtcat(vals, "%d, %d, '%s', '%s'", 
 			   now, now, object->name, object->default_acct); 
-		xstrfmtcat(extra, ", default_acct='%s'", object->default_acct);
-		if(object->qos_list && list_count(object->qos_list)) {
-			char *qos_val = NULL;
-			char *tmp_char = NULL;
-			ListIterator qos_itr =
-				list_iterator_create(object->qos_list);
-			xstrcat(cols, ", qos");
-			while((tmp_char = list_next(qos_itr))) {
-				xstrfmtcat(qos_val, ",%s", tmp_char);
-			}
-
-			xstrfmtcat(vals, ", '%s'", qos_val); 		
-			xstrfmtcat(extra, ", qos='%s'", qos_val); 		
-		} else if(normal_qos_id != NO_VAL) { 
-			/* Add normal qos to the user */
-			xstrcat(cols, ", qos");
-			xstrfmtcat(vals, ", ',%d'", normal_qos_id);
-			xstrfmtcat(extra, ", qos=',%d'", normal_qos_id);
-		}
-
+		xstrfmtcat(extra, ", default_acct='%s'",
+			   object->default_acct);
+		
 		if(object->admin_level != ACCT_ADMIN_NOTSET) {
 			xstrcat(cols, ", admin_level");
 			xstrfmtcat(vals, ", %u", object->admin_level);
@@ -1913,14 +2743,14 @@ extern int acct_storage_p_add_users(mysql_conn_t *mysql_conn, uint32_t uid,
 
 		if(txn_query)
 			xstrfmtcat(txn_query, 	
-				   ", (%d, %u, '%s', '%s', \"%s\")",
+				   ", (%d, %u, \"%s\", \"%s\", \"%s\")",
 				   now, DBD_ADD_USERS, object->name,
 				   user_name, extra);
 		else
 			xstrfmtcat(txn_query, 	
 				   "insert into %s "
 				   "(timestamp, action, name, actor, info) "
-				   "values (%d, %u, '%s', '%s', \"%s\")",
+				   "values (%d, %u, \"%s\", \"%s\", \"%s\")",
 				   txn_table,
 				   now, DBD_ADD_USERS, object->name,
 				   user_name, extra);
@@ -1991,19 +2821,19 @@ extern int acct_storage_p_add_coord(mysql_conn_t *mysql_conn, uint32_t uid,
 	while((user = list_next(itr))) {
 		while((acct = list_next(itr2))) {
 			if(query) 
-				xstrfmtcat(query, ", (%d, %d, '%s', '%s')",
+				xstrfmtcat(query, ", (%d, %d, \"%s\", \"%s\")",
 					   now, now, acct, user);
 			else
 				query = xstrdup_printf(
 					"insert into %s (creation_time, "
 					"mod_time, acct, user) values "
-					"(%d, %d, '%s', '%s')",
+					"(%d, %d, \"%s\", \"%s\")",
 					acct_coord_table, 
 					now, now, acct, user); 
 
 			if(txn_query)
 				xstrfmtcat(txn_query, 	
-					   ", (%d, %u, '%s', '%s', '%s')",
+					   ", (%d, %u, \"%s\", \"%s\", \"%s\")",
 					   now, DBD_ADD_ACCOUNT_COORDS, user,
 					   user_name, acct);
 			else
@@ -2011,7 +2841,8 @@ extern int acct_storage_p_add_coord(mysql_conn_t *mysql_conn, uint32_t uid,
 					   "insert into %s "
 					   "(timestamp, action, name, "
 					   "actor, info) "
-					   "values (%d, %u, '%s', '%s', '%s')",
+					   "values (%d, %u, \"%s\", "
+					   "\"%s\", \"%s\")",
 					   txn_table,
 					   now, DBD_ADD_ACCOUNT_COORDS, user,
 					   user_name, acct);
@@ -2064,7 +2895,8 @@ extern int acct_storage_p_add_accts(mysql_conn_t *mysql_conn, uint32_t uid,
 	char *cols = NULL, *vals = NULL, *query = NULL, *txn_query = NULL;
 	time_t now = time(NULL);
 	char *user_name = NULL;
-	char *extra = NULL;
+	char *extra = NULL, *tmp_extra = NULL;
+	
 	int affect_rows = 0;
 	List assoc_list = list_create(destroy_acct_association_rec);
 
@@ -2085,31 +2917,12 @@ extern int acct_storage_p_add_accts(mysql_conn_t *mysql_conn, uint32_t uid,
 		}
 		xstrcat(cols, "creation_time, mod_time, name, "
 			"description, organization");
-		xstrfmtcat(vals, "%d, %d, '%s', '%s', '%s'", 
+		xstrfmtcat(vals, "%d, %d, \"%s\", \"%s\", \"%s\"", 
 			   now, now, object->name, 
 			   object->description, object->organization); 
-		xstrfmtcat(extra, ", description='%s', organization='%s'",
+		xstrfmtcat(extra, ", description=\"%s\", organization=\"%s\"",
 			   object->description, object->organization); 		
 		
-		if(object->qos_list && list_count(object->qos_list)) {
-			char *qos_val = NULL;
-			char *tmp_char = NULL;
-			ListIterator qos_itr =
-				list_iterator_create(object->qos_list);
-			xstrcat(cols, ", qos");
-			while((tmp_char = list_next(qos_itr))) {
-				xstrfmtcat(qos_val, ",%s", tmp_char);
-			}
-
-			xstrfmtcat(vals, ", '%s'", qos_val); 		
-			xstrfmtcat(extra, ", qos='%s'", qos_val); 		
-		} else if(normal_qos_id != NO_VAL) { 
-			/* Add normal qos to the account */
-			xstrcat(cols, ", qos");
-			xstrfmtcat(vals, ", ',%d'", normal_qos_id);
-			xstrfmtcat(extra, ", qos=',%d'", normal_qos_id);
-		}
-
 		query = xstrdup_printf(
 			"insert into %s (%s) values (%s) "
 			"on duplicate key update deleted=0, mod_time=%d %s;",
@@ -2135,19 +2948,23 @@ extern int acct_storage_p_add_accts(mysql_conn_t *mysql_conn, uint32_t uid,
 			continue;
 		}
 
+		/* we always have a ', ' as the first 2 chars */
+		tmp_extra = _fix_double_quotes(extra+2);
+
 		if(txn_query)
 			xstrfmtcat(txn_query, 	
-				   ", (%d, %u, '%s', '%s', \"%s\")",
+				   ", (%d, %u, \"%s\", \"%s\", \"%s\")",
 				   now, DBD_ADD_ACCOUNTS, object->name,
-				   user_name, extra);
+				   user_name, tmp_extra);
 		else
 			xstrfmtcat(txn_query, 	
 				   "insert into %s "
 				   "(timestamp, action, name, actor, info) "
-				   "values (%d, %u, '%s', '%s', \"%s\")",
+				   "values (%d, %u, \"%s\", \"%s\", \"%s\")",
 				   txn_table,
 				   now, DBD_ADD_ACCOUNTS, object->name,
-				   user_name, extra);
+				   user_name, tmp_extra);
+		xfree(tmp_extra);
 		xfree(extra);
 		
 		if(!object->assoc_list)
@@ -2217,74 +3034,18 @@ extern int acct_storage_p_add_clusters(mysql_conn_t *mysql_conn, uint32_t uid,
 		}
 
 		xstrcat(cols, "creation_time, mod_time, acct, cluster");
-		xstrfmtcat(vals, "%d, %d, 'root', '%s'",
+		xstrfmtcat(vals, "%d, %d, 'root', \"%s\"",
 			   now, now, object->name);
 		xstrfmtcat(extra, ", mod_time=%d", now);
-	
-		if((int)object->default_fairshare >= 0) {
-			xstrcat(cols, ", fairshare");
-			xstrfmtcat(vals, ", %u", object->default_fairshare);
-			xstrfmtcat(extra, ", fairshare=%u",
-				   object->default_fairshare);
-		} else if ((int)object->default_fairshare == INFINITE) {
-			xstrcat(cols, ", fairshare");
-			xstrfmtcat(vals, ", NULL");
-			xstrfmtcat(extra, ", fairshare=NULL");		
-		}
-
-		if((int)object->default_max_cpu_secs_per_job >= 0) {
-			xstrcat(cols, ", max_cpu_secs_per_job");
-			xstrfmtcat(vals, ", %u",
-				   object->default_max_cpu_secs_per_job);
-			xstrfmtcat(extra, ", max_cpu_secs_per_job=%u",
-				   object->default_max_cpu_secs_per_job);
-		} else if((int)object->default_max_cpu_secs_per_job 
-			  == INFINITE) {
-			xstrcat(cols, ", max_cpu_secs_per_job");
-			xstrfmtcat(vals, ", NULL");
-			xstrfmtcat(extra, ", max_cpu_secs_per_job=NULL");
-		}
-		
-		if((int)object->default_max_jobs >= 0) {
-			xstrcat(cols, ", max_jobs");
-			xstrfmtcat(vals, ", %u", object->default_max_jobs);
-			xstrfmtcat(extra, ", max_jobs=%u",
-				   object->default_max_jobs);
-		} else if((int)object->default_max_jobs == INFINITE) {
-			xstrcat(cols, ", max_jobs");
-			xstrfmtcat(vals, ", NULL");
-			xstrfmtcat(extra, ", max_jobs=NULL");		
-		}
-
-		if((int)object->default_max_nodes_per_job >= 0) {
-			xstrcat(cols, ", max_nodes_per_job");
-			xstrfmtcat(vals, ", %u", 
-				   object->default_max_nodes_per_job);
-			xstrfmtcat(extra, ", max_nodes_per_job=%u",
-				   object->default_max_nodes_per_job);
-		} else if((int)object->default_max_nodes_per_job == INFINITE) {
-			xstrcat(cols, ", max_nodes_per_job");
-			xstrfmtcat(vals, ", NULL");
-			xstrfmtcat(extra, ", max_nodes_per_job=NULL");
-		}
-
-		if((int)object->default_max_wall_duration_per_job >= 0) {
-			xstrcat(cols, ", max_wall_duration_per_job");
-			xstrfmtcat(vals, ", %u",
-				   object->default_max_wall_duration_per_job);
-			xstrfmtcat(extra, ", max_wall_duration_per_job=%u",
-				   object->default_max_wall_duration_per_job);
-		} else if((int)object->default_max_wall_duration_per_job
-			  == INFINITE) {
-			xstrcat(cols, ", max_wall_duration_per_job");
-			xstrfmtcat(vals, ", NULL");
-			xstrfmtcat(extra, ", max_wall_duration_per_job=NULL");
-		}
-
+		if(object->root_assoc)
+			_setup_association_limits(object->root_assoc, &cols, 
+						  &vals, &extra,
+						  QOS_LEVEL_SET, 1);
 		xstrfmtcat(query, 
 			   "insert into %s (creation_time, mod_time, name) "
-			   "values (%d, %d, '%s') "
-			   "on duplicate key update deleted=0, mod_time=%d;",
+			   "values (%d, %d, \"%s\") "
+			   "on duplicate key update deleted=0, mod_time=%d, "
+			   "control_host='', control_port=0;",
 			   cluster_table, 
 			   now, now, object->name,
 			   now);
@@ -2341,7 +3102,7 @@ extern int acct_storage_p_add_clusters(mysql_conn_t *mysql_conn, uint32_t uid,
 		xstrfmtcat(query,
 			   "insert into %s "
 			   "(timestamp, action, name, actor, info) "
-			   "values (%d, %u, '%s', '%s', \"%s\");",
+			   "values (%d, %u, \"%s\", \"%s\", \"%s\");",
 			   txn_table, now, DBD_ADD_CLUSTERS, 
 			   object->name, user_name, extra);
 		xfree(extra);			
@@ -2355,19 +3116,15 @@ extern int acct_storage_p_add_clusters(mysql_conn_t *mysql_conn, uint32_t uid,
 
 		/* Add user root by default to run from the root
 		 * association.  This gets popped off so we need to
-		 * readd it every time here. 
+		 * read it every time here. 
 		 */
 		assoc = xmalloc(sizeof(acct_association_rec_t));
+		init_acct_association_rec(assoc);
 		list_append(assoc_list, assoc);
 		
 		assoc->cluster = xstrdup(object->name);
 		assoc->user = xstrdup("root");
 		assoc->acct = xstrdup("root");
-		assoc->fairshare = NO_VAL;
-		assoc->max_cpu_secs_per_job = NO_VAL;
-		assoc->max_jobs = NO_VAL;
-		assoc->max_nodes_per_job = NO_VAL;
-		assoc->max_wall_duration_per_job = NO_VAL;
 
 		if(acct_storage_p_add_associations(mysql_conn, uid, assoc_list)
 		   == SLURM_ERROR) {
@@ -2404,7 +3161,7 @@ extern int acct_storage_p_add_associations(mysql_conn_t *mysql_conn,
 	int i=0;
 	acct_association_rec_t *object = NULL;
 	char *cols = NULL, *vals = NULL, *txn_query = NULL,
-		*extra = NULL, *query = NULL, *update = NULL;
+		*extra = NULL, *query = NULL, *update = NULL, *tmp_extra = NULL;
 	char *parent = NULL;
 	time_t now = time(NULL);
 	char *user_name = NULL;
@@ -2412,6 +3169,7 @@ extern int acct_storage_p_add_associations(mysql_conn_t *mysql_conn,
 	int assoc_id = 0;
 	int incr = 0, my_left = 0;
 	int affect_rows = 0;
+	int moved_parent = 0;
 	MYSQL_RES *result = NULL;
 	MYSQL_ROW row;
 	char *old_parent = NULL, *old_cluster = NULL;
@@ -2459,22 +3217,23 @@ extern int acct_storage_p_add_associations(mysql_conn_t *mysql_conn,
 		}
 
 		xstrcat(cols, "creation_time, mod_time, cluster, acct");
-		xstrfmtcat(vals, "%d, %d, '%s', '%s'", 
+		xstrfmtcat(vals, "%d, %d, \"%s\", \"%s\"", 
 			   now, now, object->cluster, object->acct); 
-		xstrfmtcat(update, "where id>=0 && cluster='%s' && acct='%s'",
+		xstrfmtcat(update, 
+			   "where cluster=\"%s\" && acct=\"%s\"",
 			   object->cluster, object->acct); 
 
 		xstrfmtcat(extra, ", mod_time=%d", now);
 		if(!object->user) {
 			xstrcat(cols, ", parent_acct");
-			xstrfmtcat(vals, ", '%s'", parent);
-			xstrfmtcat(extra, ", parent_acct='%s'", parent);
+			xstrfmtcat(vals, ", \"%s\"", parent);
+			xstrfmtcat(extra, ", parent_acct=\"%s\"", parent);
 			xstrfmtcat(update, " && user=''"); 
 		} else {
 			char *part = object->partition;
 			xstrcat(cols, ", user");
-			xstrfmtcat(vals, ", '%s'", object->user); 		
-			xstrfmtcat(update, " && user='%s'",
+			xstrfmtcat(vals, ", \"%s\"", object->user); 		
+			xstrfmtcat(update, " && user=\"%s\"",
 				   object->user); 
 
 			/* We need to give a partition wiether it be
@@ -2483,45 +3242,12 @@ extern int acct_storage_p_add_associations(mysql_conn_t *mysql_conn,
 			if(!part)
 				part = "";
 			xstrcat(cols, ", partition");
-			xstrfmtcat(vals, ", '%s'", part);
-			xstrfmtcat(update, " && partition='%s'", part);
-		}
-
-		if((int)object->fairshare >= 0) {
-			xstrcat(cols, ", fairshare");
-			xstrfmtcat(vals, ", %d", object->fairshare);
-			xstrfmtcat(extra, ", fairshare=%d",
-				   object->fairshare);
-		}
-
-		if((int)object->max_jobs >= 0) {
-			xstrcat(cols, ", max_jobs");
-			xstrfmtcat(vals, ", %d", object->max_jobs);
-			xstrfmtcat(extra, ", max_jobs=%d",
-				   object->max_jobs);
+			xstrfmtcat(vals, ", \"%s\"", part);
+			xstrfmtcat(update, " && partition=\"%s\"", part);
 		}
 
-		if((int)object->max_nodes_per_job >= 0) {
-			xstrcat(cols, ", max_nodes_per_job");
-			xstrfmtcat(vals, ", %d", object->max_nodes_per_job);
-			xstrfmtcat(extra, ", max_nodes_per_job=%d",
-				   object->max_nodes_per_job);
-		}
-
-		if((int)object->max_wall_duration_per_job >= 0) {
-			xstrcat(cols, ", max_wall_duration_per_job");
-			xstrfmtcat(vals, ", %d",
-				   object->max_wall_duration_per_job);
-			xstrfmtcat(extra, ", max_wall_duration_per_job=%d",
-				   object->max_wall_duration_per_job);
-		}
-
-		if((int)object->max_cpu_secs_per_job >= 0) {
-			xstrcat(cols, ", max_cpu_secs_per_job");
-			xstrfmtcat(vals, ", %d", object->max_cpu_secs_per_job);
-			xstrfmtcat(extra, ", max_cpu_secs_per_job=%d",
-				   object->max_cpu_secs_per_job);
-		}
+		_setup_association_limits(object, &cols, &vals, &extra, 
+					  QOS_LEVEL_NONE, 1);
 
 		for(i=0; i<MASSOC_COUNT; i++) {
 			if(i) 
@@ -2566,7 +3292,7 @@ extern int acct_storage_p_add_associations(mysql_conn_t *mysql_conn,
 			   || strcasecmp(object->cluster, old_cluster)) {
 				char *sel_query = xstrdup_printf(
 					"SELECT lft FROM %s WHERE "
-					"acct = '%s' and cluster = '%s' "
+					"acct = \"%s\" and cluster = \"%s\" "
 					"and user = '' order by lft;",
 					assoc_table,
 					parent, object->cluster);
@@ -2586,8 +3312,9 @@ extern int acct_storage_p_add_associations(mysql_conn_t *mysql_conn,
 						assoc_table, incr,
 						my_left,
 						assoc_table);
-					debug3("%d query\n%s", mysql_conn->conn,
-					       up_query);
+					debug3("%d(%d) query\n%s", 
+					       mysql_conn->conn, 
+					       __LINE__, up_query);
 					rc = mysql_db_query(
 						mysql_conn->db_conn,
 						up_query);
@@ -2603,8 +3330,8 @@ extern int acct_storage_p_add_associations(mysql_conn_t *mysql_conn,
 					}
 				}
 
-				debug3("%d query\n%s", mysql_conn->conn,
-				       sel_query);
+				debug3("%d(%d) query\n%s", mysql_conn->conn,
+				       __LINE__, sel_query);
 				if(!(sel_result = mysql_db_query_ret(
 					     mysql_conn->db_conn,
 					     sel_query, 0))) {
@@ -2650,8 +3377,8 @@ extern int acct_storage_p_add_associations(mysql_conn_t *mysql_conn,
 			/* definantly works but slow */
 /* 			xstrfmtcat(query, */
 /* 				   "SELECT @myLeft := lft FROM %s WHERE " */
-/* 				   "acct = '%s' " */
-/* 				   "and cluster = '%s' and user = '';", */
+/* 				   "acct = \"%s\" " */
+/* 				   "and cluster = \"%s\" and user = '';", */
 /* 				   assoc_table, */
 /* 				   parent, */
 /* 				   object->cluster); */
@@ -2694,9 +3421,10 @@ extern int acct_storage_p_add_associations(mysql_conn_t *mysql_conn,
 						object->cluster,
 						row[MASSOC_ID],
 						row[MASSOC_PACCT],
-						object->parent_acct)
+						object->parent_acct, now)
 				   == SLURM_ERROR)
 					continue;
+				moved_parent = 1;
 			}
 
 
@@ -2732,25 +3460,29 @@ extern int acct_storage_p_add_associations(mysql_conn_t *mysql_conn,
 		}
 
 		object->id = assoc_id;
-
+		
 		if(_addto_update_list(mysql_conn->update_list, ACCT_ADD_ASSOC,
 				      object) == SLURM_SUCCESS) {
 			list_remove(itr);
 		}
 
+		/* we always have a ', ' as the first 2 chars */
+		tmp_extra = _fix_double_quotes(extra+2);
+
 		if(txn_query)
 			xstrfmtcat(txn_query, 	
-				   ", (%d, %d, '%d', '%s', \"%s\")",
+				   ", (%d, %d, '%d', \"%s\", \"%s\")",
 				   now, DBD_ADD_ASSOCS, assoc_id, user_name,
-				   extra);
+				   tmp_extra);
 		else
 			xstrfmtcat(txn_query, 	
 				   "insert into %s "
 				   "(timestamp, action, name, actor, info) "
-				   "values (%d, %d, '%d', '%s', \"%s\")",
+				   "values (%d, %d, '%d', \"%s\", \"%s\")",
 				   txn_table,
 				   now, DBD_ADD_ASSOCS, assoc_id, user_name, 
-				   extra);
+				   tmp_extra);
+		xfree(tmp_extra);
 		xfree(extra);
 	}
 	list_iterator_destroy(itr);
@@ -2783,6 +3515,10 @@ extern int acct_storage_p_add_associations(mysql_conn_t *mysql_conn,
 	}
 
 end_it:
+
+	xfree(old_parent);
+	xfree(old_cluster);
+
 	if(rc != SLURM_ERROR) {
 		if(txn_query) {
 			xstrcat(txn_query, ";");
@@ -2794,6 +3530,45 @@ end_it:
 				rc = SLURM_SUCCESS;
 			}
 		}
+		if(moved_parent) {
+			List assoc_list = NULL;
+			ListIterator itr = NULL;
+			acct_association_rec_t *assoc = NULL;
+			//acct_association_cond_t assoc_cond;
+			/* now we need to send the update of the new parents and
+			 * limits, so just to be safe, send the whole
+			 * tree because we could have some limits that
+			 * were affected but not noticed.
+			 */
+			/* we can probably just look at the mod time now but
+			 * we will have to wait for the next revision number
+			 * since you can't query on mod time here and I don't
+			 * want to rewrite code to make it happen
+			 */
+			//bzero(&assoc_cond, sizeof(acct_association_cond_t));
+			
+			if(!(assoc_list = 
+			     acct_storage_p_get_associations(mysql_conn,
+							     uid, NULL)))
+				return rc;
+			/* NOTE: you can not use list_pop, or list_push
+			   anywhere either, since mysql is
+			   exporting something of the same type as a macro,
+			   which messes everything up (my_list.h is
+			   the bad boy).
+			   So we are just going to delete each item as it
+			   comes out since we are moving it to the update_list.
+			*/
+			itr = list_iterator_create(assoc_list);
+			while((assoc = list_next(itr))) {
+				if(_addto_update_list(mysql_conn->update_list, 
+						      ACCT_MODIFY_ASSOC,
+						      assoc) == SLURM_SUCCESS) 
+					list_remove(itr);
+			}
+			list_iterator_destroy(itr);
+			list_destroy(assoc_list);
+		}
 	} else {
 		xfree(txn_query);
 		if(mysql_conn->rollback) {
@@ -2801,9 +3576,6 @@ end_it:
 		}
 		list_flush(mysql_conn->update_list);
 	}
-
-	xfree(old_parent);
-	xfree(old_cluster);
 					
 	return rc;
 #else
@@ -2818,7 +3590,8 @@ extern int acct_storage_p_add_qos(mysql_conn_t *mysql_conn, uint32_t uid,
 	ListIterator itr = NULL;
 	int rc = SLURM_SUCCESS;
 	acct_qos_rec_t *object = NULL;
-	char *query = NULL;
+	char *cols = NULL, *extra = NULL, *vals = NULL, *query = NULL,
+		*tmp_extra = NULL;
 	time_t now = time(NULL);
 	char *user_name = NULL;
 	int affect_rows = 0;
@@ -2835,22 +3608,30 @@ extern int acct_storage_p_add_qos(mysql_conn_t *mysql_conn, uint32_t uid,
 			rc = SLURM_ERROR;
 			continue;
 		}
+		xstrcat(cols, "creation_time, mod_time, name");
+		xstrfmtcat(vals, "%d, %d, \"%s\"", 
+			   now, now, object->name); 
+		xstrfmtcat(extra, ", mod_time=%d", now);
 
+		_setup_qos_limits(object, &cols, &vals, &extra, 
+				  QOS_LEVEL_NONE);
 		xstrfmtcat(query, 
-			   "insert into %s (creation_time, mod_time, "
-			   "name, description) "
-			   "values (%d, %d, '%s', '%s') "
-			   "on duplicate key update deleted=0, mod_time=%d;",
-			   qos_table, 
-			   now, now, object->name, object->description,
-			   now);
+			   "insert into %s (%s) values (%s) "
+			   "on duplicate key update deleted=0, "
+			   "id=LAST_INSERT_ID(id)%s;",
+			   qos_table, cols, vals, extra);
+
+
 		debug3("%d(%d) query\n%s",
 		       mysql_conn->conn, __LINE__, query);
-		rc = mysql_db_query(mysql_conn->db_conn, query);
+		object->id = mysql_insert_ret_id(mysql_conn->db_conn, query);
 		xfree(query);
-		if(rc != SLURM_SUCCESS) {
+		if(!object->id) {
 			error("Couldn't add qos %s", object->name);
 			added=0;
+			xfree(cols);
+			xfree(extra);
+			xfree(vals);
 			break;
 		}
 
@@ -2858,16 +3639,29 @@ extern int acct_storage_p_add_qos(mysql_conn_t *mysql_conn, uint32_t uid,
 
 		if(!affect_rows) {
 			debug2("nothing changed %d", affect_rows);
+			xfree(cols);
+			xfree(extra);
+			xfree(vals);
 			continue;
 		}
+		/* FIX ME: we have to edit all the other qos's to set
+		   there preemptee or preemptor based on what is here.
+		*/
+		/* we always have a ', ' as the first 2 chars */
+		tmp_extra = _fix_double_quotes(extra+2);
+
 		xstrfmtcat(query,
 			   "insert into %s "
 			   "(timestamp, action, name, actor, info) "
-			   "values (%d, %u, '%s', '%s', \"%s\");",
+			   "values (%d, %u, \"%s\", \"%s\", \"%s\");",
 			   txn_table,
 			   now, DBD_ADD_QOS, object->name, user_name,
-			   object->description);
+			   tmp_extra);
 
+		xfree(tmp_extra);
+		xfree(cols);
+		xfree(extra);
+		xfree(vals);
 		debug4("query\n%s",query);
 		rc = mysql_db_query(mysql_conn->db_conn, query);
 		xfree(query);
@@ -2913,9 +3707,8 @@ extern List acct_storage_p_modify_users(mysql_conn_t *mysql_conn, uint32_t uid,
 	int set = 0;
 	MYSQL_RES *result = NULL;
 	MYSQL_ROW row;
-	int replace_qos = 0;
 
-	if(!user_cond) {
+	if(!user_cond || !user) {
 		error("we need something to change");
 		return NULL;
 	}
@@ -2932,7 +3725,7 @@ extern List acct_storage_p_modify_users(mysql_conn_t *mysql_conn, uint32_t uid,
 		while((object = list_next(itr))) {
 			if(set) 
 				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "name='%s'", object);
+			xstrfmtcat(extra, "name=\"%s\"", object);
 			set = 1;
 		}
 		list_iterator_destroy(itr);
@@ -2946,65 +3739,19 @@ extern List acct_storage_p_modify_users(mysql_conn_t *mysql_conn, uint32_t uid,
 		while((object = list_next(itr))) {
 			if(set) 
 				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "default_acct='%s'", object);
+			xstrfmtcat(extra, "default_acct=\"%s\"", object);
 			set = 1;
 		}
 		list_iterator_destroy(itr);
 		xstrcat(extra, ")");
 	}
 	
-	if(user_cond->qos_list && list_count(user_cond->qos_list)) {
-		set = 0;
-		xstrcat(extra, " && (");
-		itr = list_iterator_create(user_cond->qos_list);
-		while((object = list_next(itr))) {
-			if(set) 
-				xstrcat(extra, " || ");
-			xstrfmtcat(extra, 
-				   "(qos like '%%,%s' || qos like '%%,%s,%%')",
-				   object, object);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-		xstrcat(extra, ")");
-	}
-
 	if(user_cond->admin_level != ACCT_ADMIN_NOTSET) {
 		xstrfmtcat(extra, " && admin_level=%u", user_cond->admin_level);
 	}
 
 	if(user->default_acct)
-		xstrfmtcat(vals, ", default_acct='%s'", user->default_acct);
-
-	if(user->qos_list && list_count(user->qos_list)) {
-		char *tmp_qos = NULL;
-		set = 0;
-		itr = list_iterator_create(user->qos_list);
-		while((object = list_next(itr))) {
-			/* when adding we need to make sure we don't
-			 * already have it so we remove it and then add
-			 * it.
-			 */
-			if(object[0] == '-') {
-				xstrfmtcat(vals,
-					   ", qos=replace(qos, ',%s', '')",
-					   object+1);
-			} else if(object[0] == '+') {
-				xstrfmtcat(vals,
-					   ", qos=concat_ws(',', "
-					   "replace(qos, ',%s', ''), '%s')",
-					   object+1, object+1);
-			} else {
-				xstrfmtcat(tmp_qos, ",%s", object);
-			}
-		}
-		list_iterator_destroy(itr);
-		if(tmp_qos) {
-			xstrfmtcat(vals, ", qos='%s'", tmp_qos);
-			xfree(tmp_qos);
-			replace_qos = 1;
-		}
-	}
+		xstrfmtcat(vals, ", default_acct=\"%s\"", user->default_acct);
 
 	if(user->admin_level != ACCT_ADMIN_NOTSET)
 		xstrfmtcat(vals, ", admin_level=%u", user->admin_level);
@@ -3014,7 +3761,7 @@ extern List acct_storage_p_modify_users(mysql_conn_t *mysql_conn, uint32_t uid,
 		error("Nothing to change");
 		return NULL;
 	}
-	query = xstrdup_printf("select name, qos from %s %s;",
+	query = xstrdup_printf("select name from %s %s;",
 			       user_table, extra);
 	xfree(extra);
 	if(!(result = mysql_db_query_ret(
@@ -3031,65 +3778,15 @@ extern List acct_storage_p_modify_users(mysql_conn_t *mysql_conn, uint32_t uid,
 		object = xstrdup(row[0]);
 		list_append(ret_list, object);
 		if(!rc) {
-			xstrfmtcat(name_char, "(name='%s'", object);
+			xstrfmtcat(name_char, "(name=\"%s\"", object);
 			rc = 1;
 		} else  {
-			xstrfmtcat(name_char, " || name='%s'", object);
+			xstrfmtcat(name_char, " || name=\"%s\"", object);
 		}
 		user_rec = xmalloc(sizeof(acct_user_rec_t));
 		user_rec->name = xstrdup(object);
 		user_rec->default_acct = xstrdup(user->default_acct);
 		user_rec->admin_level = user->admin_level;
-		if(user->qos_list) {
-			ListIterator new_qos_itr = 
-				list_iterator_create(user->qos_list);
-			ListIterator curr_qos_itr = NULL;
-			char *new_qos = NULL, *curr_qos = NULL;
-
-			user_rec->qos_list = list_create(slurm_destroy_char);
-			if(!replace_qos)
-				slurm_addto_char_list(user_rec->qos_list,
-						      row[1]);
-			curr_qos_itr = list_iterator_create(user_rec->qos_list);
-
-			while((new_qos = list_next(new_qos_itr))) {
-				char *tmp_char = NULL;
-				if(new_qos[0] == '-') {
-					tmp_char = xstrdup(object+1);
-					while((curr_qos =
-					       list_next(curr_qos_itr))) {
-						if(!strcmp(curr_qos,
-							   tmp_char)) {
-							list_delete_item(
-								curr_qos_itr);
-							break;
-						}
-					}
-					xfree(tmp_char);
-					list_iterator_reset(curr_qos_itr);
-				} else if(new_qos[0] == '+') {
-					tmp_char = xstrdup(object+1);
-					while((curr_qos =
-					       list_next(curr_qos_itr))) {
-						if(!strcmp(curr_qos,
-							   tmp_char)) {
-							break;
-						}
-					}
-					if(!curr_qos)
-						list_append(user_rec->qos_list,
-							    tmp_char);
-					else
-						xfree(tmp_char);
-					list_iterator_reset(curr_qos_itr);
-				} else {
-					list_append(user_rec->qos_list,
-						    xstrdup(object));
-				}
-			}
-			list_iterator_destroy(curr_qos_itr);
-			list_iterator_destroy(new_qos_itr);			
-		}
 		_addto_update_list(mysql_conn->update_list, ACCT_MODIFY_USER,
 				   user_rec);
 	}
@@ -3140,7 +3837,7 @@ extern List acct_storage_p_modify_accounts(
 	MYSQL_RES *result = NULL;
 	MYSQL_ROW row;
 
-	if(!acct_cond) {
+	if(!acct_cond || !acct) {
 		error("we need something to change");
 		return NULL;
 	}
@@ -3158,7 +3855,7 @@ extern List acct_storage_p_modify_accounts(
 		while((object = list_next(itr))) {
 			if(set) 
 				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "name='%s'", object);
+			xstrfmtcat(extra, "name=\"%s\"", object);
 			set = 1;
 		}
 		list_iterator_destroy(itr);
@@ -3173,7 +3870,7 @@ extern List acct_storage_p_modify_accounts(
 		while((object = list_next(itr))) {
 			if(set) 
 				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "description='%s'", object);
+			xstrfmtcat(extra, "description=\"%s\"", object);
 			set = 1;
 		}
 		list_iterator_destroy(itr);
@@ -3188,62 +3885,17 @@ extern List acct_storage_p_modify_accounts(
 		while((object = list_next(itr))) {
 			if(set) 
 				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "organization='%s'", object);
+			xstrfmtcat(extra, "organization=\"%s\"", object);
 			set = 1;
 		}
 		list_iterator_destroy(itr);
 		xstrcat(extra, ")");
 	}
 	
-	if(acct_cond->qos_list && list_count(acct_cond->qos_list)) {
-		set = 0;
-		xstrcat(extra, " && (");
-		itr = list_iterator_create(acct_cond->qos_list);
-		while((object = list_next(itr))) {
-			if(set) 
-				xstrcat(extra, " || ");
-			xstrfmtcat(extra, 
-				   "(qos like '%%,%s' || qos like '%%,%s,%%')",
-				   object, object);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-		xstrcat(extra, ")");
-	}
-
 	if(acct->description)
-		xstrfmtcat(vals, ", description='%s'", acct->description);
+		xstrfmtcat(vals, ", description=\"%s\"", acct->description);
 	if(acct->organization)
-		xstrfmtcat(vals, ", organization='%s'", acct->organization);
-
-	if(acct->qos_list && list_count(acct->qos_list)) {
-		char *tmp_qos = NULL;
-		set = 0;
-		itr = list_iterator_create(acct->qos_list);
-		while((object = list_next(itr))) {
-			/* when adding we need to make sure we don't
-			 * already have it so we remove it and then add
-			 * it.
-			 */
-			if(object[0] == '-') {
-				xstrfmtcat(vals,
-					   ", qos=replace(qos, ',%s', '')",
-					   object+1);
-			} else if(object[0] == '+') {
-				xstrfmtcat(vals,
-					   ", qos=concat_ws(',', "
-					   "replace(qos, ',%s', ''), '%s')",
-					   object+1, object+1);
-			} else {
-				xstrfmtcat(tmp_qos, ",%s", object);
-			}
-		}
-		list_iterator_destroy(itr);
-		if(tmp_qos) {
-			xstrfmtcat(vals, ", qos='%s'", tmp_qos);
-			xfree(tmp_qos);
-		}
-	}
+		xstrfmtcat(vals, ", organization=\"%s\"", acct->organization);
 
 	if(!extra || !vals) {
 		errno = SLURM_NO_CHANGE_IN_DATA;
@@ -3267,10 +3919,10 @@ extern List acct_storage_p_modify_accounts(
 		object = xstrdup(row[0]);
 		list_append(ret_list, object);
 		if(!rc) {
-			xstrfmtcat(name_char, "(name='%s'", object);
+			xstrfmtcat(name_char, "(name=\"%s\"", object);
 			rc = 1;
 		} else  {
-			xstrfmtcat(name_char, " || name='%s'", object);
+			xstrfmtcat(name_char, " || name=\"%s\"", object);
 		}
 
 	}
@@ -3329,7 +3981,7 @@ extern List acct_storage_p_modify_clusters(mysql_conn_t *mysql_conn,
 	 * the controller when it loads 
 	 */
 
-	if(!cluster_cond) {
+	if(!cluster_cond || !cluster) {
 		error("we need something to change");
 		return NULL;
 	}
@@ -3353,21 +4005,40 @@ extern List acct_storage_p_modify_clusters(mysql_conn_t *mysql_conn,
 		xstrcat(extra, ")");
 	}
 
-		
+	set = 0;
 	if(cluster->control_host) {
 		xstrfmtcat(vals, ", control_host='%s'", cluster->control_host);
+		set++;
 	}
+
 	if(cluster->control_port) {
 		xstrfmtcat(vals, ", control_port=%u", cluster->control_port);
+		set++;
+	}
+
+	if(cluster->rpc_version) {
+		xstrfmtcat(vals, ", rpc_version=%u", cluster->rpc_version);
+		set++;
 	}
 
 	if(!vals) {
+		xfree(extra);
 		errno = SLURM_NO_CHANGE_IN_DATA;
 		error("Nothing to change");
 		return NULL;
+	} else if(set != 3) {
+		xfree(vals);
+		xfree(extra);
+		errno = EFAULT;
+		error("Need control host, port and rpc version "
+		      "to register a cluster");
+		return NULL;
 	}
 
-	xstrfmtcat(query, "select name from %s %s;", cluster_table, extra);
+
+	xstrfmtcat(query, "select name, control_port from %s %s;",
+		   cluster_table, extra);
+
 	xfree(extra);
 	debug3("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
 	if(!(result = mysql_db_query_ret(
@@ -3377,11 +4048,21 @@ extern List acct_storage_p_modify_clusters(mysql_conn_t *mysql_conn,
 		error("no result given for %s", extra);
 		return NULL;
 	}
-	
+
+	/* Set here is used to ask for jobs and nodes in anything
+	 * other than up state, so it you reset it later make sure
+	 * this is accounted for before you do
+	 */
+	set = 1;
 	rc = 0;
 	ret_list = list_create(slurm_destroy_char);
 	while((row = mysql_fetch_row(result))) {
 		object = xstrdup(row[0]);
+
+		/* check to see if this is the first time to register */
+		if(row[1][0] == '0')
+			set = 0;
+
 		list_append(ret_list, object);
 		if(!rc) {
 			xstrfmtcat(name_char, "name='%s'", object);
@@ -3415,6 +4096,40 @@ extern List acct_storage_p_modify_clusters(mysql_conn_t *mysql_conn,
 		}
 	}
 
+	/* Get all nodes in a down state and jobs pending or running.
+	 * This is for the first time a cluster registers
+	 */
+
+	if(!set && slurmdbd_conf) {
+		/* This only happens here with the slurmdbd.  If
+		 * calling this plugin directly we do this in
+		 * clusteracct_storage_p_cluster_procs.
+		 */
+		slurm_addr ctld_address;
+		slurm_fd fd;
+
+		info("First time to register cluster requesting "
+		     "running jobs and system information.");
+
+		slurm_set_addr_char(&ctld_address, cluster->control_port,
+				    cluster->control_host);
+		fd =  slurm_open_msg_conn(&ctld_address);
+		if (fd < 0) {
+			error("can not open socket back to slurmctld");
+		} else {
+			slurm_msg_t out_msg;
+			slurm_msg_t_init(&out_msg);
+			out_msg.msg_type = ACCOUNTING_FIRST_REG;
+			out_msg.flags = SLURM_GLOBAL_AUTH_KEY;
+			slurm_send_node_msg(fd, &out_msg);
+			/* We probably need to add matching recv_msg function
+			 * for an arbitray fd or should these be fire
+			 * and forget?  For this, that we can probably
+			 * forget about it */
+			slurm_close_stream(fd);
+		}
+	}
+
 end_it:
 	xfree(name_char);
 	xfree(assoc_char);
@@ -3444,6 +4159,9 @@ extern List acct_storage_p_modify_associations(
 	MYSQL_RES *result = NULL;
 	MYSQL_ROW row;
 	acct_user_rec_t user;
+	char *tmp_char1=NULL, *tmp_char2=NULL;
+	int set_qos_vals = 0;
+	int moved_parent = 0;
 
 	char *massoc_req_inx[] = {
 		"id",
@@ -3453,7 +4171,8 @@ extern List acct_storage_p_modify_associations(
 		"user",
 		"partition",
 		"lft",
-		"rgt"
+		"rgt",
+		"qos",
 	};
 	
 	enum {
@@ -3465,10 +4184,11 @@ extern List acct_storage_p_modify_associations(
 		MASSOC_PART,
 		MASSOC_LFT,
 		MASSOC_RGT,
+		MASSOC_QOS,
 		MASSOC_COUNT
 	};
 
-	if(!assoc_cond) {
+	if(!assoc_cond || !assoc) {
 		error("we need something to change");
 		return NULL;
 	}
@@ -3515,118 +4235,26 @@ extern List acct_storage_p_modify_associations(
 		is_admin = 1;
 	}
 
-	if(assoc_cond->acct_list && list_count(assoc_cond->acct_list)) {
-		set = 0;
-		xstrcat(extra, " && (");
-		itr = list_iterator_create(assoc_cond->acct_list);
-		while((object = list_next(itr))) {
-			if(set) 
-				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "acct='%s'", object);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-		xstrcat(extra, ")");
-	}
-
-	if(assoc_cond->cluster_list && list_count(assoc_cond->cluster_list)) {
-		set = 0;
-		xstrcat(extra, " && (");
-		itr = list_iterator_create(assoc_cond->cluster_list);
-		while((object = list_next(itr))) {
-			if(set) 
-				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "cluster='%s'", object);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-		xstrcat(extra, ")");
-	}
+	set = _setup_association_cond_limits(assoc_cond, &extra);
 
-	if(assoc_cond->user_list && list_count(assoc_cond->user_list)) {
-		set = 0;
-		xstrcat(extra, " && (");
-		itr = list_iterator_create(assoc_cond->user_list);
-		while((object = list_next(itr))) {
-			if(set) 
-				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "user='%s'", object);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-		xstrcat(extra, ")");
+	/* This needs to be here to make sure we only modify the
+	   correct set of associations The first clause was already
+	   taken care of above. */
+	if (assoc_cond->user_list && !list_count(assoc_cond->user_list)) {
+		debug4("no user specified looking at users");
+		xstrcat(extra, " && user != '' ");
 	} else if (!assoc_cond->user_list) {
 		debug4("no user specified looking at accounts");
 		xstrcat(extra, " && user = '' ");
-	} else {
-		debug4("no user specified looking at users");
-		xstrcat(extra, " && user != '' ");
-	}
+	} 
 
-	if(assoc_cond->partition_list 
-	   && list_count(assoc_cond->partition_list)) {
-		set = 0;
-		xstrcat(extra, " && (");
-		itr = list_iterator_create(assoc_cond->partition_list);
-		while((object = list_next(itr))) {
-			if(set) 
-				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "partition='%s'", object);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-		xstrcat(extra, ")");
-	}
+	_setup_association_limits(assoc, &tmp_char1, &tmp_char2,
+				  &vals, QOS_LEVEL_MODIFY, 0);
+	xfree(tmp_char1);
+	xfree(tmp_char2);
 
-	if(assoc_cond->id_list && list_count(assoc_cond->id_list)) {
-		set = 0;
-		xstrcat(extra, " && (");
-		itr = list_iterator_create(assoc_cond->id_list);
-		while((object = list_next(itr))) {
-			if(set) 
-				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "id=%s", object);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-		xstrcat(extra, ")");
-	}
-	
-	if(assoc_cond->parent_acct) {
-		xstrfmtcat(extra, " && parent_acct='%s'",
-			   assoc_cond->parent_acct);
-	}
-
-	if((int)assoc->fairshare >= 0) 
-		xstrfmtcat(vals, ", fairshare=%u", assoc->fairshare);
-	else if((int)assoc->fairshare == INFINITE) {
-		xstrfmtcat(vals, ", fairshare=1");
-		assoc->fairshare = 1;
-	}
-	if((int)assoc->max_cpu_secs_per_job >= 0) 
-		xstrfmtcat(vals, ", max_cpu_secs_per_job=%u",
-			   assoc->max_cpu_secs_per_job);
-	else if((int)assoc->max_cpu_secs_per_job == INFINITE) {
-		xstrfmtcat(vals, ", max_cpu_secs_per_job=NULL");
-	}
-	if((int)assoc->max_jobs >= 0) 
-		xstrfmtcat(vals, ", max_jobs=%u", assoc->max_jobs);
-	else if((int)assoc->max_jobs == INFINITE) {
-		xstrfmtcat(vals, ", max_jobs=NULL");
-	}
-	if((int)assoc->max_nodes_per_job >= 0) 
-		xstrfmtcat(vals, ", max_nodes_per_job=%u",
-			   assoc->max_nodes_per_job);
-	else if((int)assoc->max_nodes_per_job == INFINITE) {
-		xstrfmtcat(vals, ", max_nodes_per_job=NULL");
-	}
-	if((int)assoc->max_wall_duration_per_job >= 0) 
-		xstrfmtcat(vals, ", max_wall_duration_per_job=%u",
-			   assoc->max_wall_duration_per_job);
-	else if((int)assoc->max_wall_duration_per_job == INFINITE) {
-		xstrfmtcat(vals, ", max_wall_duration_per_job=NULL");
-	}
-	if(!extra || (!vals && !assoc->parent_acct)) {
+	if(!extra || (!vals && !assoc->parent_acct
+		      && (!assoc->qos_list || !list_count(assoc->qos_list)))) {
 		errno = SLURM_NO_CHANGE_IN_DATA;
 		error("Nothing to change");
 		return NULL;
@@ -3635,10 +4263,10 @@ extern List acct_storage_p_modify_associations(
 	for(i=0; i<MASSOC_COUNT; i++) {
 		if(i) 
 			xstrcat(object, ", ");
-		xstrcat(object, massoc_req_inx[i]);
+		xstrfmtcat(object, "t1.%s", massoc_req_inx[i]);
 	}
 
-	query = xstrdup_printf("select distinct %s from %s where deleted=0%s "
+	query = xstrdup_printf("select distinct %s from %s as t1%s "
 			       "order by lft FOR UPDATE;",
 			       object, assoc_table, extra);
 	xfree(object);
@@ -3659,8 +4287,6 @@ extern List acct_storage_p_modify_associations(
 	while((row = mysql_fetch_row(result))) {
 		acct_association_rec_t *mod_assoc = NULL;
 		int account_type=0;
-/* 		MYSQL_RES *result2 = NULL; */
-/* 		MYSQL_ROW row2; */
 
 		if(!is_admin) {
 			acct_coord_rec_t *coord = NULL;
@@ -3705,7 +4331,7 @@ extern List acct_storage_p_modify_associations(
 					error("User %s(%d) can not modify "
 					      "account (%s) because they "
 					      "are not coordinators of "
-					      "parent account '%s'.",
+					      "parent account \"%s\".",
 					      user.name, user.uid,
 					      row[MASSOC_ACCT], 
 					      row[MASSOC_PACCT]);
@@ -3765,94 +4391,535 @@ extern List acct_storage_p_modify_associations(
 						row[MASSOC_CLUSTER],
 						row[MASSOC_ID],
 						row[MASSOC_PACCT],
-						assoc->parent_acct)
+						assoc->parent_acct,
+						now)
 				   == SLURM_ERROR)
 					break;
+				moved_parent = 1;
 			}
 			account_type = 1;
 		}
 		list_append(ret_list, object);
-
-		if(!set) {
-			xstrfmtcat(name_char, "(id=%s", row[MASSOC_ID]);
-			set = 1;
-		} else {
-			xstrfmtcat(name_char, " || id=%s", row[MASSOC_ID]);
+	
+		if(!set) {
+			xstrfmtcat(name_char, "(id=%s", row[MASSOC_ID]);
+			set = 1;
+		} else {
+			xstrfmtcat(name_char, " || id=%s", row[MASSOC_ID]);
+		}
+
+		mod_assoc = xmalloc(sizeof(acct_association_rec_t));
+		init_acct_association_rec(mod_assoc);
+		mod_assoc->id = atoi(row[MASSOC_ID]);
+
+		mod_assoc->fairshare = assoc->fairshare;
+
+		mod_assoc->grp_cpus = assoc->grp_cpus;
+		mod_assoc->grp_cpu_mins = assoc->grp_cpu_mins;
+		mod_assoc->grp_jobs = assoc->grp_jobs;
+		mod_assoc->grp_nodes = assoc->grp_nodes;
+		mod_assoc->grp_submit_jobs = assoc->grp_submit_jobs;
+		mod_assoc->grp_wall = assoc->grp_wall;
+
+		mod_assoc->max_cpus_pj = assoc->max_cpus_pj;
+		mod_assoc->max_cpu_mins_pj = assoc->max_cpu_mins_pj;
+		mod_assoc->max_jobs = assoc->max_jobs;
+		mod_assoc->max_nodes_pj = assoc->max_nodes_pj;
+		mod_assoc->max_submit_jobs = assoc->max_submit_jobs;
+		mod_assoc->max_wall_pj = assoc->max_wall_pj;
+
+		if(!row[MASSOC_USER][0])
+			mod_assoc->parent_acct = xstrdup(assoc->parent_acct);
+		if(assoc->qos_list && list_count(assoc->qos_list)) {
+			ListIterator new_qos_itr = 
+				list_iterator_create(assoc->qos_list);
+			char *new_qos = NULL, *tmp_qos = NULL;
+		
+			mod_assoc->qos_list = list_create(slurm_destroy_char);
+			
+			while((new_qos = list_next(new_qos_itr))) {
+				if(new_qos[0] == '-' || new_qos[0] == '+') {
+					list_append(mod_assoc->qos_list,
+						    xstrdup(new_qos));
+				} else if(new_qos[0]) {
+					list_append(mod_assoc->qos_list,
+						    xstrdup_printf("=%s",
+								   new_qos));
+				}	
+
+				if(set_qos_vals)
+					continue;
+				/* Now we can set up the values and
+				   make sure we aren't over writing
+				   things that are really from the
+				   parent
+				*/
+				if(new_qos[0] == '-') {
+					xstrfmtcat(vals,
+						   ", qos=if(qos='', '', "
+						   "replace(qos, ',%s', ''))"
+						   ", delta_qos=if(qos='', "
+						   "concat(replace(delta_qos, "
+						   "',%s', ''), ',%s'), '')",
+						   new_qos+1, new_qos, new_qos);
+				} else if(new_qos[0] == '+') {
+					xstrfmtcat(vals,
+						   ", qos=if(qos='', '', "
+						   "concat_ws(',', "
+						   "replace(qos, ',%s', ''), "
+						   "\"%s\")), delta_qos=if("
+						   "qos='', concat("
+						   "replace(delta_qos, "
+						   "',%s', ''), ',%s'), '')",
+						   new_qos+1, new_qos+1,
+						   new_qos, new_qos);
+				} else if(new_qos[0]) 
+					xstrfmtcat(tmp_qos, ",%s", new_qos);
+				else
+					xstrcat(tmp_qos, "");
+					
+			}
+			list_iterator_destroy(new_qos_itr);
+
+			if(!set_qos_vals && tmp_qos) 
+				xstrfmtcat(vals, ", qos='%s', delta_qos=''",
+					   tmp_qos);	
+			xfree(tmp_qos);
+
+			set_qos_vals=1;
+		}
+
+		if(_addto_update_list(mysql_conn->update_list, 
+				      ACCT_MODIFY_ASSOC,
+				      mod_assoc) != SLURM_SUCCESS) 
+			error("couldn't add to the update list");
+		if(account_type) {
+			_modify_unset_users(mysql_conn,
+					    mod_assoc,
+					    row[MASSOC_ACCT],
+					    atoi(row[MASSOC_LFT]),
+					    atoi(row[MASSOC_RGT]),
+					    ret_list,
+					    moved_parent);
+		}
+	}
+	mysql_free_result(result);
+
+	if(assoc->parent_acct) {
+		if(rc != SLURM_SUCCESS) {
+			if(mysql_conn->rollback) {
+				mysql_db_rollback(mysql_conn->db_conn);
+			}
+			list_flush(mysql_conn->update_list);
+			list_destroy(ret_list);
+			xfree(vals);
+			errno = rc;
+			return NULL;
+		}
+	}
+
+
+	if(!list_count(ret_list)) {
+		if(mysql_conn->rollback) {
+			mysql_db_rollback(mysql_conn->db_conn);
+		}
+		errno = SLURM_NO_CHANGE_IN_DATA;
+		debug3("didn't effect anything");
+		xfree(vals);
+		return ret_list;
+	}
+	xstrcat(name_char, ")");
+
+	if(vals) {
+		user_name = uid_to_string((uid_t) uid);
+		rc = _modify_common(mysql_conn, DBD_MODIFY_ASSOCS, now,
+				    user_name, assoc_table, name_char, vals);
+		xfree(user_name);
+		if (rc == SLURM_ERROR) {
+			if(mysql_conn->rollback) {
+				mysql_db_rollback(mysql_conn->db_conn);
+			}
+			list_flush(mysql_conn->update_list);
+			error("Couldn't modify associations");
+			list_destroy(ret_list);
+			ret_list = NULL;
+			goto end_it;
+		}
+	}
+	if(moved_parent) {
+		List local_assoc_list = NULL;
+		ListIterator local_itr = NULL;
+		acct_association_rec_t *local_assoc = NULL;
+		//acct_association_cond_t local_assoc_cond;
+		/* now we need to send the update of the new parents and
+		 * limits, so just to be safe, send the whole
+		 * tree because we could have some limits that
+		 * were affected but not noticed.
+		 */
+		/* we can probably just look at the mod time now but
+		 * we will have to wait for the next revision number
+		 * since you can't query on mod time here and I don't
+		 * want to rewrite code to make it happen
+		 */
+
+		//bzero(&local_assoc_cond, sizeof(acct_association_cond_t));
+		
+		if(!(local_assoc_list = 
+		     acct_storage_p_get_associations(mysql_conn,
+						     uid, NULL)))
+			return ret_list;
+		/* NOTE: you can not use list_pop, or list_push
+		   anywhere either, since mysql is
+		   exporting something of the same type as a macro,
+		   which messes everything up (my_list.h is
+		   the bad boy).
+		   So we are just going to delete each item as it
+		   comes out since we are moving it to the update_list.
+		*/
+		local_itr = list_iterator_create(local_assoc_list);
+		while((local_assoc = list_next(local_itr))) {
+			if(_addto_update_list(mysql_conn->update_list, 
+					      ACCT_MODIFY_ASSOC,
+					      local_assoc) == SLURM_SUCCESS) 
+				list_remove(local_itr);
+		}
+		list_iterator_destroy(local_itr);
+		list_destroy(local_assoc_list);		
+	}
+	
+end_it:
+	xfree(name_char);
+	xfree(vals);
+
+	return ret_list;
+#else
+	return NULL;
+#endif
+}
+
+extern List acct_storage_p_modify_qos(mysql_conn_t *mysql_conn, uint32_t uid, 
+					acct_qos_cond_t *qos_cond,
+					acct_qos_rec_t *qos)
+{
+#ifdef HAVE_MYSQL
+	ListIterator itr = NULL;
+	List ret_list = NULL;
+	int rc = SLURM_SUCCESS;
+	char *object = NULL;
+	char *vals = NULL, *extra = NULL, *query = NULL, *name_char = NULL;
+	time_t now = time(NULL);
+	char *user_name = NULL;
+	int set = 0;
+	MYSQL_RES *result = NULL;
+	MYSQL_ROW row;
+	char *tmp_char1=NULL, *tmp_char2=NULL;
+	int replace_preemptor = 0, replace_preemptee = 0;
+
+	if(!qos_cond || !qos) {
+		error("we need something to change");
+		return NULL;
+	}
+
+	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
+		return NULL;
+
+	xstrcat(extra, "where deleted=0");
+	
+	if(qos_cond->description_list 
+	   && list_count(qos_cond->description_list)) {
+		set = 0;
+		xstrcat(extra, " && (");
+		itr = list_iterator_create(qos_cond->description_list);
+		while((object = list_next(itr))) {
+			if(set) 
+				xstrcat(extra, " || ");
+			xstrfmtcat(extra, "description=\"%s\"", object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(extra, ")");
+	}
+
+	if(qos_cond->id_list 
+	   && list_count(qos_cond->id_list)) {
+		set = 0;
+		xstrcat(extra, " && (");
+		itr = list_iterator_create(qos_cond->id_list);
+		while((object = list_next(itr))) {
+			if(set) 
+				xstrcat(extra, " || ");
+			xstrfmtcat(extra, "id=\"%s\"", object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(extra, ")");
+	}
+	
+	if(qos_cond->name_list
+	   && list_count(qos_cond->name_list)) {
+		set = 0;
+		xstrcat(extra, " && (");
+		itr = list_iterator_create(qos_cond->name_list);
+		while((object = list_next(itr))) {
+			if(set) 
+				xstrcat(extra, " || ");
+			xstrfmtcat(extra, "name=\"%s\"", object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(extra, ")");
+	}
+	
+	_setup_qos_limits(qos, &tmp_char1, &tmp_char2,
+			  &vals, QOS_LEVEL_MODIFY);
+	xfree(tmp_char1);
+	xfree(tmp_char2);
+
+	if(qos->preemptee_list && list_count(qos->preemptee_list)) {
+		char *tmp_qos = NULL;
+		set = 0;
+		itr = list_iterator_create(qos->preemptee_list);
+		while((object = list_next(itr))) {
+			/* when adding we need to make sure we don't
+			 * already have it so we remove it and then add
+			 * it.
+			 */
+			if(object[0] == '-') {
+				xstrfmtcat(vals,
+					   ", preemptees="
+					   "replace(qos, ',%s', '')",
+					   object+1);
+			} else if(object[0] == '+') {
+				xstrfmtcat(vals,
+					   ", preemptees=concat_ws(',', "
+					   "replace(preemptees, ',%s', ''), "
+					   "\"%s\")",
+					   object+1, object+1);
+			} else {
+				xstrfmtcat(tmp_qos, ",%s", object);
+			}
+		}
+		list_iterator_destroy(itr);
+		if(tmp_qos) {
+			xstrfmtcat(vals, ", preemptees='%s'", tmp_qos);
+			xfree(tmp_qos);
+			replace_preemptee = 1;
+		}
+	}
+
+	if(qos->preemptor_list && list_count(qos->preemptor_list)) {
+		char *tmp_qos = NULL;
+		set = 0;
+		itr = list_iterator_create(qos->preemptor_list);
+		while((object = list_next(itr))) {
+			/* when adding we need to make sure we don't
+			 * already have it so we remove it and then add
+			 * it.
+			 */
+			if(object[0] == '-') {
+				xstrfmtcat(vals,
+					   ", preemptors="
+					   "replace(qos, ',%s', '')",
+					   object+1);
+			} else if(object[0] == '+') {
+				xstrfmtcat(vals,
+					   ", preemptors=concat_ws(',', "
+					   "replace(preemptors, ',%s', ''), "
+					   "\"%s\")",
+					   object+1, object+1);
+			} else {
+				xstrfmtcat(tmp_qos, ",%s", object);
+			}
+		}
+		list_iterator_destroy(itr);
+		if(tmp_qos) {
+			xstrfmtcat(vals, ", preemptors='%s'", tmp_qos);
+			xfree(tmp_qos);
+			replace_preemptor = 1;
+		}
+	}
+
+	if(!extra || !vals) {
+		errno = SLURM_NO_CHANGE_IN_DATA;
+		error("Nothing to change");
+		return NULL;
+	}
+	query = xstrdup_printf("select name, preemptees, preemptors "
+			       "from %s %s;", qos_table, extra);
+	xfree(extra);
+	if(!(result = mysql_db_query_ret(
+		     mysql_conn->db_conn, query, 0))) {
+		xfree(query);
+		return NULL;
+	}
+
+	rc = 0;
+	ret_list = list_create(slurm_destroy_char);
+	while((row = mysql_fetch_row(result))) {
+		acct_qos_rec_t *qos_rec = NULL;
+		
+		object = xstrdup(row[0]);
+		list_append(ret_list, object);
+		if(!rc) {
+			xstrfmtcat(name_char, "(name='%s'", object);
+			rc = 1;
+		} else  {
+			xstrfmtcat(name_char, " || name='%s'", object);
 		}
-		
-		mod_assoc = xmalloc(sizeof(acct_association_rec_t));
-		mod_assoc->id = atoi(row[MASSOC_ID]);
+		qos_rec = xmalloc(sizeof(acct_qos_rec_t));
+		qos_rec->name = xstrdup(object);
 
-		mod_assoc->max_cpu_secs_per_job = assoc->max_cpu_secs_per_job;
-		mod_assoc->fairshare = assoc->fairshare;
-		mod_assoc->max_jobs = assoc->max_jobs;
-		mod_assoc->max_nodes_per_job = assoc->max_nodes_per_job;
-		mod_assoc->max_wall_duration_per_job = 
-			assoc->max_wall_duration_per_job;
-		if(!row[MASSOC_USER][0])
-			mod_assoc->parent_acct = xstrdup(assoc->parent_acct);
+		qos_rec->grp_cpus = qos->grp_cpus;
+		qos_rec->grp_cpu_mins = qos->grp_cpu_mins;
+		qos_rec->grp_jobs = qos->grp_jobs;
+		qos_rec->grp_nodes = qos->grp_nodes;
+		qos_rec->grp_submit_jobs = qos->grp_submit_jobs;
+		qos_rec->grp_wall = qos->grp_wall;
 
-		if(_addto_update_list(mysql_conn->update_list, 
-				      ACCT_MODIFY_ASSOC,
-				      mod_assoc) != SLURM_SUCCESS) 
-			error("couldn't add to the update list");
-		if(account_type) {
-			_modify_unset_users(mysql_conn,
-					    mod_assoc,
-					    row[MASSOC_ACCT],
-					    atoi(row[MASSOC_LFT]),
-					    atoi(row[MASSOC_RGT]),
-					    ret_list);
+		qos_rec->max_cpus_pu = qos->max_cpus_pu;
+		qos_rec->max_cpu_mins_pu = qos->max_cpu_mins_pu;
+		qos_rec->max_jobs_pu  = qos->max_jobs_pu;
+		qos_rec->max_nodes_pu = qos->max_nodes_pu;
+		qos_rec->max_submit_jobs_pu  = qos->max_submit_jobs_pu;
+		qos_rec->max_wall_pu = qos->max_wall_pu;
+
+		qos_rec->priority = qos->priority;
+
+		if(qos->preemptee_list) {
+			ListIterator new_qos_itr = 
+				list_iterator_create(qos->preemptee_list);
+			ListIterator curr_qos_itr = NULL;
+			char *new_qos = NULL, *curr_qos = NULL;
+
+			qos_rec->preemptee_list = 
+				list_create(slurm_destroy_char);
+			if(!replace_preemptee)
+				slurm_addto_char_list(qos_rec->preemptee_list,
+						      row[1]);
+			curr_qos_itr = 
+				list_iterator_create(qos_rec->preemptee_list);
+			
+			while((new_qos = list_next(new_qos_itr))) {
+				char *tmp_char = NULL;
+				if(new_qos[0] == '-') {
+					tmp_char = xstrdup(new_qos+1);
+					while((curr_qos =
+					       list_next(curr_qos_itr))) {
+						if(!strcmp(curr_qos,
+							   tmp_char)) {
+							list_delete_item(
+								curr_qos_itr);
+							break;
+						}
+					}
+					xfree(tmp_char);
+					list_iterator_reset(curr_qos_itr);
+				} else if(new_qos[0] == '+') {
+					tmp_char = xstrdup(new_qos+1);
+					while((curr_qos =
+					       list_next(curr_qos_itr))) {
+						if(!strcmp(curr_qos,
+							   tmp_char)) {
+							break;
+						}
+					}
+					if(!curr_qos)
+						list_append(
+							qos_rec->preemptee_list,
+							tmp_char);
+					else
+						xfree(tmp_char);
+					list_iterator_reset(curr_qos_itr);
+				} else {
+					list_append(qos_rec->preemptee_list,
+						    xstrdup(new_qos));
+				}
+			}
+			list_iterator_destroy(curr_qos_itr);
+			list_iterator_destroy(new_qos_itr);			
 		}
-	}
-	mysql_free_result(result);
 
-	if(assoc->parent_acct) {
-		if(rc != SLURM_SUCCESS) {
-			if(mysql_conn->rollback) {
-				mysql_db_rollback(mysql_conn->db_conn);
+		if(qos->preemptor_list) {
+			ListIterator new_qos_itr = 
+				list_iterator_create(qos->preemptor_list);
+			ListIterator curr_qos_itr = NULL;
+			char *new_qos = NULL, *curr_qos = NULL;
+
+			qos_rec->preemptor_list = 
+				list_create(slurm_destroy_char);
+			if(!replace_preemptor)
+				slurm_addto_char_list(qos_rec->preemptor_list,
+						      row[2]);
+			curr_qos_itr = 
+				list_iterator_create(qos_rec->preemptor_list);
+			
+			while((new_qos = list_next(new_qos_itr))) {
+				char *tmp_char = NULL;
+				if(new_qos[0] == '-') {
+					tmp_char = xstrdup(new_qos+1);
+					while((curr_qos =
+					       list_next(curr_qos_itr))) {
+						if(!strcmp(curr_qos,
+							   tmp_char)) {
+							list_delete_item(
+								curr_qos_itr);
+							break;
+						}
+					}
+					xfree(tmp_char);
+					list_iterator_reset(curr_qos_itr);
+				} else if(new_qos[0] == '+') {
+					tmp_char = xstrdup(new_qos+1);
+					while((curr_qos =
+					       list_next(curr_qos_itr))) {
+						if(!strcmp(curr_qos,
+							   tmp_char)) {
+							break;
+						}
+					}
+					if(!curr_qos)
+						list_append(
+							qos_rec->preemptor_list,
+							tmp_char);
+					else
+						xfree(tmp_char);
+					list_iterator_reset(curr_qos_itr);
+				} else {
+					list_append(qos_rec->preemptor_list,
+						    xstrdup(new_qos));
+				}
 			}
-			list_flush(mysql_conn->update_list);
-			list_destroy(ret_list);
-			xfree(vals);
-			errno = rc;
-			return NULL;
+			list_iterator_destroy(curr_qos_itr);
+			list_iterator_destroy(new_qos_itr);			
 		}
-	}
 
+		_addto_update_list(mysql_conn->update_list, ACCT_MODIFY_QOS,
+				   qos_rec);
+	}
+	mysql_free_result(result);
 
 	if(!list_count(ret_list)) {
-		if(mysql_conn->rollback) {
-			mysql_db_rollback(mysql_conn->db_conn);
-		}
 		errno = SLURM_NO_CHANGE_IN_DATA;
-		debug3("didn't effect anything");
+		debug3("didn't effect anything\n%s", query);
 		xfree(vals);
+		xfree(query);
 		return ret_list;
 	}
+	xfree(query);
 	xstrcat(name_char, ")");
 
-	if(vals) {
-		user_name = uid_to_string((uid_t) uid);
-		rc = _modify_common(mysql_conn, DBD_MODIFY_ASSOCS, now,
-				    user_name, assoc_table, name_char, vals);
-		xfree(user_name);
-		if (rc == SLURM_ERROR) {
-			if(mysql_conn->rollback) {
-				mysql_db_rollback(mysql_conn->db_conn);
-			}
-			list_flush(mysql_conn->update_list);
-			error("Couldn't modify associations");
-			list_destroy(ret_list);
-			ret_list = NULL;
-			goto end_it;
-		}
-	}
-
-end_it:
+	user_name = uid_to_string((uid_t) uid);
+	rc = _modify_common(mysql_conn, DBD_MODIFY_QOS, now,
+			    user_name, qos_table, name_char, vals);
+	xfree(user_name);
 	xfree(name_char);
 	xfree(vals);
-
+	if (rc == SLURM_ERROR) {
+		error("Couldn't modify qos");
+		list_destroy(ret_list);
+		ret_list = NULL;
+	}
+				
 	return ret_list;
 #else
 	return NULL;
@@ -3865,6 +4932,7 @@ extern List acct_storage_p_remove_users(mysql_conn_t *mysql_conn, uint32_t uid,
 #ifdef HAVE_MYSQL
 	ListIterator itr = NULL;
 	List ret_list = NULL;
+	List coord_list = NULL;
 	int rc = SLURM_SUCCESS;
 	char *object = NULL;
 	char *extra = NULL, *query = NULL,
@@ -3874,6 +4942,8 @@ extern List acct_storage_p_remove_users(mysql_conn_t *mysql_conn, uint32_t uid,
 	int set = 0;
 	MYSQL_RES *result = NULL;
 	MYSQL_ROW row;
+	acct_user_cond_t user_coord_cond;
+	acct_association_cond_t assoc_cond;
 
 	if(!user_cond) {
 		error("we need something to remove");
@@ -3893,7 +4963,7 @@ extern List acct_storage_p_remove_users(mysql_conn_t *mysql_conn, uint32_t uid,
 		while((object = list_next(itr))) {
 			if(set) 
 				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "name='%s'", object);
+			xstrfmtcat(extra, "name=\"%s\"", object);
 			set = 1;
 		}
 		list_iterator_destroy(itr);
@@ -3907,29 +4977,13 @@ extern List acct_storage_p_remove_users(mysql_conn_t *mysql_conn, uint32_t uid,
 		while((object = list_next(itr))) {
 			if(set) 
 				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "default_acct='%s'", object);
+			xstrfmtcat(extra, "default_acct=\"%s\"", object);
 			set = 1;
 		}
 		list_iterator_destroy(itr);
 		xstrcat(extra, ")");
 	}
 	
-	if(user_cond->qos_list && list_count(user_cond->qos_list)) {
-		set = 0;
-		xstrcat(extra, " && (");
-		itr = list_iterator_create(user_cond->qos_list);
-		while((object = list_next(itr))) {
-			if(set) 
-				xstrcat(extra, " || ");
-			xstrfmtcat(extra, 
-				   "(qos like '%%,%s' || qos like '%%,%s,%%')",
-				   object, object);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-		xstrcat(extra, ")");
-	}
-
 	if(user_cond->admin_level != ACCT_ADMIN_NOTSET) {
 		xstrfmtcat(extra, " && admin_level=%u", user_cond->admin_level);
 	}
@@ -3947,6 +5001,14 @@ extern List acct_storage_p_remove_users(mysql_conn_t *mysql_conn, uint32_t uid,
 		return NULL;
 	}
 
+	memset(&user_coord_cond, 0, sizeof(acct_user_cond_t));
+	memset(&assoc_cond, 0, sizeof(acct_association_cond_t));
+	/* we do not need to free the objects we put in here since
+	   they are also placed in a list that will be freed
+	*/
+	assoc_cond.user_list = list_create(NULL);
+	user_coord_cond.assoc_cond = &assoc_cond;
+
 	rc = 0;
 	ret_list = list_create(slurm_destroy_char);
 	while((row = mysql_fetch_row(result))) {
@@ -3954,13 +5016,15 @@ extern List acct_storage_p_remove_users(mysql_conn_t *mysql_conn, uint32_t uid,
 		acct_user_rec_t *user_rec = NULL;
 		
 		list_append(ret_list, object);
+		list_append(assoc_cond.user_list, object);
+
 		if(!rc) {
-			xstrfmtcat(name_char, "name='%s'", object);
-			xstrfmtcat(assoc_char, "t2.user='%s'", object);
+			xstrfmtcat(name_char, "name=\"%s\"", object);
+			xstrfmtcat(assoc_char, "t2.user=\"%s\"", object);
 			rc = 1;
 		} else {
-			xstrfmtcat(name_char, " || name='%s'", object);
-			xstrfmtcat(assoc_char, " || t2.user='%s'", object);
+			xstrfmtcat(name_char, " || name=\"%s\"", object);
+			xstrfmtcat(assoc_char, " || t2.user=\"%s\"", object);
 		}
 		user_rec = xmalloc(sizeof(acct_user_rec_t));
 		user_rec->name = xstrdup(object);
@@ -3974,10 +5038,18 @@ extern List acct_storage_p_remove_users(mysql_conn_t *mysql_conn, uint32_t uid,
 		errno = SLURM_NO_CHANGE_IN_DATA;
 		debug3("didn't effect anything\n%s", query);
 		xfree(query);
+		list_destroy(assoc_cond.user_list);
 		return ret_list;
 	}
 	xfree(query);
 
+	/* We need to remove these accounts from the coord's that have it */
+	coord_list = acct_storage_p_remove_coord(
+		mysql_conn, uid, NULL, &user_coord_cond);
+	if(coord_list)
+		list_destroy(coord_list);
+	list_destroy(assoc_cond.user_list);
+
 	user_name = uid_to_string((uid_t) uid);
 	rc = _remove_common(mysql_conn, DBD_REMOVE_USERS, now,
 			    user_name, user_table, name_char, assoc_char);
@@ -4026,10 +5098,11 @@ extern List acct_storage_p_remove_coord(mysql_conn_t *mysql_conn, uint32_t uid,
 	MYSQL_ROW row;
 	acct_user_rec_t user;
 
-	if(!user_cond) {
+	if(!user_cond && !acct_list) {
 		error("we need something to remove");
 		return NULL;
-	}
+	} else if(user_cond && user_cond->assoc_cond)
+		user_list = user_cond->assoc_cond->user_list;
 
 	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
 		return NULL;
@@ -4075,19 +5148,18 @@ extern List acct_storage_p_remove_coord(mysql_conn_t *mysql_conn, uint32_t uid,
 
 	/* Leave it this way since we are using extra below */
 
-	if(user_cond->assoc_cond && user_cond->assoc_cond->user_list
-	   && list_count(user_cond->assoc_cond->user_list)) {
+	if(user_list && list_count(user_list)) {
 		set = 0;
 		if(extra)
 			xstrcat(extra, " && (");
 		else
-			xstrcat(extra, " (");
+			xstrcat(extra, "(");
 			
-		itr = list_iterator_create(user_cond->assoc_cond->user_list);
+		itr = list_iterator_create(user_list);
 		while((object = list_next(itr))) {
 			if(set) 
 				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "user='%s'", object);
+			xstrfmtcat(extra, "user=\"%s\"", object);
 			set = 1;
 		}
 		list_iterator_destroy(itr);
@@ -4099,13 +5171,13 @@ extern List acct_storage_p_remove_coord(mysql_conn_t *mysql_conn, uint32_t uid,
 		if(extra)
 			xstrcat(extra, " && (");
 		else
-			xstrcat(extra, " (");
+			xstrcat(extra, "(");
 
 		itr = list_iterator_create(acct_list);
 		while((object = list_next(itr))) {
 			if(set) 
 				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "acct='%s'", object);
+			xstrfmtcat(extra, "acct=\"%s\"", object);
 			set = 1;
 		}
 		list_iterator_destroy(itr);
@@ -4210,6 +5282,7 @@ extern List acct_storage_p_remove_accts(mysql_conn_t *mysql_conn, uint32_t uid,
 #ifdef HAVE_MYSQL
 	ListIterator itr = NULL;
 	List ret_list = NULL;
+	List coord_list = NULL;
 	int rc = SLURM_SUCCESS;
 	char *object = NULL;
 	char *extra = NULL, *query = NULL,
@@ -4238,7 +5311,7 @@ extern List acct_storage_p_remove_accts(mysql_conn_t *mysql_conn, uint32_t uid,
 		while((object = list_next(itr))) {
 			if(set) 
 				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "name='%s'", object);
+			xstrfmtcat(extra, "name=\"%s\"", object);
 			set = 1;
 		}
 		list_iterator_destroy(itr);
@@ -4253,7 +5326,7 @@ extern List acct_storage_p_remove_accts(mysql_conn_t *mysql_conn, uint32_t uid,
 		while((object = list_next(itr))) {
 			if(set) 
 				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "description='%s'", object);
+			xstrfmtcat(extra, "description=\"%s\"", object);
 			set = 1;
 		}
 		list_iterator_destroy(itr);
@@ -4268,29 +5341,13 @@ extern List acct_storage_p_remove_accts(mysql_conn_t *mysql_conn, uint32_t uid,
 		while((object = list_next(itr))) {
 			if(set) 
 				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "organization='%s'", object);
+			xstrfmtcat(extra, "organization=\"%s\"", object);
 			set = 1;
 		}
 		list_iterator_destroy(itr);
 		xstrcat(extra, ")");
 	}
 	
-	if(acct_cond->qos_list && list_count(acct_cond->qos_list)) {
-		set = 0;
-		xstrcat(extra, " && (");
-		itr = list_iterator_create(acct_cond->qos_list);
-		while((object = list_next(itr))) {
-			if(set) 
-				xstrcat(extra, " || ");
-			xstrfmtcat(extra, 
-				   "(qos like '%%,%s' || qos like '%%,%s,%%')",
-				   object, object);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-		xstrcat(extra, ")");
-	}
-
 	if(!extra) {
 		error("Nothing to remove");
 		return NULL;
@@ -4310,12 +5367,12 @@ extern List acct_storage_p_remove_accts(mysql_conn_t *mysql_conn, uint32_t uid,
 		char *object = xstrdup(row[0]);
 		list_append(ret_list, object);
 		if(!rc) {
-			xstrfmtcat(name_char, "name='%s'", object);
-			xstrfmtcat(assoc_char, "t2.acct='%s'", object);
+			xstrfmtcat(name_char, "name=\"%s\"", object);
+			xstrfmtcat(assoc_char, "t2.acct=\"%s\"", object);
 			rc = 1;
 		} else  {
-			xstrfmtcat(name_char, " || name='%s'", object);
-			xstrfmtcat(assoc_char, " || t2.acct='%s'", object);
+			xstrfmtcat(name_char, " || name=\"%s\"", object);
+			xstrfmtcat(assoc_char, " || t2.acct=\"%s\"", object);
 		}
 	}
 	mysql_free_result(result);
@@ -4328,6 +5385,12 @@ extern List acct_storage_p_remove_accts(mysql_conn_t *mysql_conn, uint32_t uid,
 	}
 	xfree(query);
 
+	/* We need to remove these accounts from the coord's that have it */
+	coord_list = acct_storage_p_remove_coord(
+		mysql_conn, uid, ret_list, NULL);
+	if(coord_list)
+		list_destroy(coord_list);
+
 	user_name = uid_to_string((uid_t) uid);
 	rc = _remove_common(mysql_conn, DBD_REMOVE_ACCOUNTS, now,
 			    user_name, acct_table, name_char, assoc_char);
@@ -4379,7 +5442,7 @@ extern List acct_storage_p_remove_clusters(mysql_conn_t *mysql_conn,
 		while((object = list_next(itr))) {
 			if(set) 
 				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "name='%s'", object);
+			xstrfmtcat(extra, "name=\"%s\"", object);
 			set = 1;
 		}
 		list_iterator_destroy(itr);
@@ -4404,14 +5467,14 @@ extern List acct_storage_p_remove_clusters(mysql_conn_t *mysql_conn,
 		char *object = xstrdup(row[0]);
 		list_append(ret_list, object);
 		if(!rc) {
-			xstrfmtcat(name_char, "name='%s'", object);
-			xstrfmtcat(extra, "t2.cluster='%s'", object);
-			xstrfmtcat(assoc_char, "cluster='%s'", object);
+			xstrfmtcat(name_char, "name=\"%s\"", object);
+			xstrfmtcat(extra, "t2.cluster=\"%s\"", object);
+			xstrfmtcat(assoc_char, "cluster=\"%s\"", object);
 			rc = 1;
 		} else  {
-			xstrfmtcat(name_char, " || name='%s'", object);
-			xstrfmtcat(extra, " || t2.cluster='%s'", object);
-			xstrfmtcat(assoc_char, " || cluster='%s'", object);
+			xstrfmtcat(name_char, " || name=\"%s\"", object);
+			xstrfmtcat(extra, " || t2.cluster=\"%s\"", object);
+			xstrfmtcat(assoc_char, " || cluster=\"%s\"", object);
 		}
 	}
 	mysql_free_result(result);
@@ -4427,9 +5490,11 @@ extern List acct_storage_p_remove_clusters(mysql_conn_t *mysql_conn,
 	/* We should not need to delete any cluster usage just set it
 	 * to deleted */
 	xstrfmtcat(query,
+		   "update %s set period_end=%d where (%s);"
 		   "update %s set mod_time=%d, deleted=1 where (%s);"
 		   "update %s set mod_time=%d, deleted=1 where (%s);"
 		   "update %s set mod_time=%d, deleted=1 where (%s);",
+		   event_table, now, assoc_char,
 		   cluster_day_table, now, assoc_char,
 		   cluster_hour_table, now, assoc_char,
 		   cluster_month_table, now, assoc_char);
@@ -4542,95 +5607,19 @@ extern List acct_storage_p_remove_associations(
 			}
 			if(!user.coord_accts || !list_count(user.coord_accts)) {
 				error("This user doesn't have any "
-				      "coordinator abilities");
-				errno = ESLURM_ACCESS_DENIED;
-				return NULL;
-			}
-		}
-	} else {
-		/* Setting this here just makes it easier down below
-		 * since user will not be filled in.
-		 */
-		is_admin = 1;
-	}
-
-	xstrcat(extra, "where id>0 && deleted=0");
-
-	if(assoc_cond->acct_list && list_count(assoc_cond->acct_list)) {
-		set = 0;
-		xstrcat(extra, " && (");
-		itr = list_iterator_create(assoc_cond->acct_list);
-		while((object = list_next(itr))) {
-			if(set) 
-				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "acct='%s'", object);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-		xstrcat(extra, ")");
-	}
-
-	if(assoc_cond->cluster_list && list_count(assoc_cond->cluster_list)) {
-		set = 0;
-		xstrcat(extra, " && (");
-		itr = list_iterator_create(assoc_cond->cluster_list);
-		while((object = list_next(itr))) {
-			if(set) 
-				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "cluster='%s'", object);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-		xstrcat(extra, ")");
-	}
-
-	if(assoc_cond->user_list && list_count(assoc_cond->user_list)) {
-		set = 0;
-		xstrcat(extra, " && (");
-		itr = list_iterator_create(assoc_cond->user_list);
-		while((object = list_next(itr))) {
-			if(set) 
-				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "user='%s'", object);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-		xstrcat(extra, ")");
-	}
-
-	if(assoc_cond->partition_list 
-	   && list_count(assoc_cond->partition_list)) {
-		set = 0;
-		xstrcat(extra, " && (");
-		itr = list_iterator_create(assoc_cond->partition_list);
-		while((object = list_next(itr))) {
-			if(set) 
-				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "partition='%s'", object);
-			set = 1;
+				      "coordinator abilities");
+				errno = ESLURM_ACCESS_DENIED;
+				return NULL;
+			}
 		}
-		list_iterator_destroy(itr);
-		xstrcat(extra, ")");
+	} else {
+		/* Setting this here just makes it easier down below
+		 * since user will not be filled in.
+		 */
+		is_admin = 1;
 	}
 
-	if(assoc_cond->id_list && list_count(assoc_cond->id_list)) {
-		set = 0;
-		xstrcat(extra, " && (");
-		itr = list_iterator_create(assoc_cond->id_list);
-		while((object = list_next(itr))) {
-			if(set) 
-				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "id=%s", object);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-		xstrcat(extra, ")");
-	}
-	
-	if(assoc_cond->parent_acct) {
-		xstrfmtcat(extra, " && parent_acct='%s'",
-			   assoc_cond->parent_acct);
-	}
+	set = _setup_association_cond_limits(assoc_cond, &extra);
 
 	for(i=0; i<RASSOC_COUNT; i++) {
 		if(i) 
@@ -4638,8 +5627,8 @@ extern List acct_storage_p_remove_associations(
 		xstrcat(object, rassoc_req_inx[i]);
 	}
 
-	query = xstrdup_printf("select lft, rgt from %s %s order by lft "
-			       "FOR UPDATE;",
+	query = xstrdup_printf("select distinct t1.lft, t1.rgt from %s as t1%s "
+			       "order by lft FOR UPDATE;",
 			       assoc_table, extra);
 	xfree(extra);
 	if(!(result = mysql_db_query_ret(
@@ -4690,6 +5679,7 @@ extern List acct_storage_p_remove_associations(
 		xfree(name_char);
 		return NULL;
 	}
+	xfree(query);
 
 	rc = 0;
 	ret_list = list_create(slurm_destroy_char);
@@ -4751,6 +5741,7 @@ extern List acct_storage_p_remove_associations(
 		}
 
 		rem_assoc = xmalloc(sizeof(acct_association_rec_t));
+		init_acct_association_rec(rem_assoc);
 		rem_assoc->id = atoi(row[RASSOC_ID]);
 		if(_addto_update_list(mysql_conn->update_list, 
 				      ACCT_REMOVE_ASSOC,
@@ -4821,7 +5812,7 @@ extern List acct_storage_p_remove_qos(mysql_conn_t *mysql_conn, uint32_t uid,
 		while((object = list_next(itr))) {
 			if(set) 
 				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "description='%s'", object);
+			xstrfmtcat(extra, "description=\"%s\"", object);
 			set = 1;
 		}
 		list_iterator_destroy(itr);
@@ -4836,7 +5827,7 @@ extern List acct_storage_p_remove_qos(mysql_conn_t *mysql_conn, uint32_t uid,
 		while((object = list_next(itr))) {
 			if(set) 
 				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "id='%s'", object);
+			xstrfmtcat(extra, "id=\"%s\"", object);
 			set = 1;
 		}
 		list_iterator_destroy(itr);
@@ -4851,7 +5842,7 @@ extern List acct_storage_p_remove_qos(mysql_conn_t *mysql_conn, uint32_t uid,
 		while((object = list_next(itr))) {
 			if(set) 
 				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "name='%s'", object);
+			xstrfmtcat(extra, "name=\"%s\"", object);
 			set = 1;
 		}
 		list_iterator_destroy(itr);
@@ -4863,7 +5854,7 @@ extern List acct_storage_p_remove_qos(mysql_conn_t *mysql_conn, uint32_t uid,
 		return NULL;
 	}
 
-	query = xstrdup_printf("select id from %s %s;", qos_table, extra);
+	query = xstrdup_printf("select id, name from %s %s;", qos_table, extra);
 	xfree(extra);
 	if(!(result = mysql_db_query_ret(
 		     mysql_conn->db_conn, query, 0))) {
@@ -4871,25 +5862,25 @@ extern List acct_storage_p_remove_qos(mysql_conn_t *mysql_conn, uint32_t uid,
 		return NULL;
 	}
 
-	rc = 0;
+	name_char = NULL;
 	ret_list = list_create(slurm_destroy_char);
 	while((row = mysql_fetch_row(result))) {
-		char *object = xstrdup(row[0]);
 		acct_qos_rec_t *qos_rec = NULL;
 
-		list_append(ret_list, object);
-		if(!rc) {
-			xstrfmtcat(name_char, "id='%s'", object);
-			xstrfmtcat(assoc_char, "qos=replace(qos, ',%s', '')",
-				   object);
-			rc = 1;
-		} else  {
-			xstrfmtcat(name_char, " || id='%s'", object); 
-			xstrfmtcat(assoc_char, ", qos=replace(qos, ',%s', '')",
-				   object);
-		}
+		list_append(ret_list, xstrdup(row[1]));
+		if(!name_char)
+			xstrfmtcat(name_char, "id=\"%s\"", row[0]);
+		else
+			xstrfmtcat(name_char, " || id=\"%s\"", row[0]); 
+		xstrfmtcat(assoc_char, 
+			   ", qos=replace(qos, ',%s', '')"
+			   ", delta_qos=replace(delta_qos, ',+%s', '')"
+			   ", delta_qos=replace(delta_qos, ',-%s', '')",
+			   row[0], row[0], row[0]);
+
 		qos_rec = xmalloc(sizeof(acct_qos_rec_t));
-		qos_rec->name = xstrdup(object);
+		/* we only need id when removing no real need to init */
+		qos_rec->id = atoi(row[0]);
 		_addto_update_list(mysql_conn->update_list, ACCT_REMOVE_QOS,
 				   qos_rec);
 	}
@@ -4906,6 +5897,7 @@ extern List acct_storage_p_remove_qos(mysql_conn_t *mysql_conn, uint32_t uid,
 	user_name = uid_to_string((uid_t) uid);
 	rc = _remove_common(mysql_conn, DBD_REMOVE_ACCOUNTS, now,
 			    user_name, qos_table, name_char, assoc_char);
+	xfree(assoc_char);
 	xfree(name_char);
 	xfree(user_name);
 	if (rc == SLURM_ERROR) {
@@ -4940,13 +5932,11 @@ extern List acct_storage_p_get_users(mysql_conn_t *mysql_conn, uid_t uid,
 	char *user_req_inx[] = {
 		"name",
 		"default_acct",
-		"qos",
 		"admin_level"
 	};
 	enum {
 		USER_REQ_NAME,
 		USER_REQ_DA,
-		USER_REQ_QOS,
 		USER_REQ_AL,
 		USER_REQ_COUNT
 	};
@@ -5001,7 +5991,7 @@ extern List acct_storage_p_get_users(mysql_conn_t *mysql_conn, uid_t uid,
 		while((object = list_next(itr))) {
 			if(set) 
 				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "name='%s'", object);
+			xstrfmtcat(extra, "name=\"%s\"", object);
 			set = 1;
 		}
 		list_iterator_destroy(itr);
@@ -5015,29 +6005,13 @@ extern List acct_storage_p_get_users(mysql_conn_t *mysql_conn, uid_t uid,
 		while((object = list_next(itr))) {
 			if(set) 
 				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "default_acct='%s'", object);
+			xstrfmtcat(extra, "default_acct=\"%s\"", object);
 			set = 1;
 		}
 		list_iterator_destroy(itr);
 		xstrcat(extra, ")");
 	}
 	
-	if(user_cond->qos_list && list_count(user_cond->qos_list)) {
-		set = 0;
-		xstrcat(extra, " && (");
-		itr = list_iterator_create(user_cond->qos_list);
-		while((object = list_next(itr))) {
-			if(set) 
-				xstrcat(extra, " || ");
-			xstrfmtcat(extra, 
-				   "(qos like '%%,%s' || qos like '%%,%s,%%')",
-				   object, object);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-		xstrcat(extra, ")");
-	}
-
 	if(user_cond->admin_level != ACCT_ADMIN_NOTSET) {
 		xstrfmtcat(extra, " && admin_level=%u",
 			   user_cond->admin_level);
@@ -5047,7 +6021,7 @@ empty:
 	 * if this flag is set. 
 	 */
 	if(!is_admin && (private_data & PRIVATE_DATA_USERS)) {
-		xstrfmtcat(extra, " && name='%s'", user.name);
+		xstrfmtcat(extra, " && name=\"%s\"", user.name);
 	}
 
 	xfree(tmp);
@@ -5070,6 +6044,16 @@ empty:
 
 	user_list = list_create(destroy_acct_user_rec);
 
+	if(user_cond && user_cond->with_assocs) {
+		/* We are going to be freeing the inners of
+		   this list in the user->name so we don't
+		   free it here
+		*/
+		if(user_cond->assoc_cond->user_list)
+			list_destroy(user_cond->assoc_cond->user_list);
+		user_cond->assoc_cond->user_list = list_create(NULL);
+	}
+
 	while((row = mysql_fetch_row(result))) {
 		acct_user_rec_t *user = xmalloc(sizeof(acct_user_rec_t));
 /* 		uid_t pw_uid; */
@@ -5078,12 +6062,7 @@ empty:
 		user->name =  xstrdup(row[USER_REQ_NAME]);
 		user->default_acct = xstrdup(row[USER_REQ_DA]);
 		user->admin_level = atoi(row[USER_REQ_AL]);
-		if(row[USER_REQ_QOS] && row[USER_REQ_QOS][0]) {
-			user->qos_list = list_create(slurm_destroy_char);
-			slurm_addto_char_list(user->qos_list,
-					      row[USER_REQ_QOS]);
-		}
-
+		
 		/* user id will be set on the client since this could be on a
 		 * different machine where this user may not exist or
 		 * may have a different uid
@@ -5099,25 +6078,53 @@ empty:
 		}
 
 		if(user_cond && user_cond->with_assocs) {
-			acct_association_cond_t *assoc_cond = NULL;
 			if(!user_cond->assoc_cond) {
 				user_cond->assoc_cond = xmalloc(
 					sizeof(acct_association_cond_t));
 			}
-			assoc_cond = user_cond->assoc_cond;
-			if(assoc_cond->user_list)
-				list_destroy(assoc_cond->user_list);
 
-			assoc_cond->user_list = list_create(NULL);
-			list_append(assoc_cond->user_list, user->name);
-			user->assoc_list = acct_storage_p_get_associations(
-				mysql_conn, uid, assoc_cond);
-			list_destroy(assoc_cond->user_list);
-			assoc_cond->user_list = NULL;
+			list_append(user_cond->assoc_cond->user_list,
+				    user->name);
 		}
 	}
 	mysql_free_result(result);
 
+	if(user_cond && user_cond->with_assocs 
+	   && list_count(user_cond->assoc_cond->user_list)) {
+		ListIterator assoc_itr = NULL;
+		acct_user_rec_t *user = NULL;
+		acct_association_rec_t *assoc = NULL;
+		List assoc_list = acct_storage_p_get_associations(
+			mysql_conn, uid, user_cond->assoc_cond);
+
+		if(!assoc_list) {
+			error("no associations");
+			return user_list;
+		}
+
+		itr = list_iterator_create(user_list);
+		assoc_itr = list_iterator_create(assoc_list);
+		while((user = list_next(itr))) {
+			while((assoc = list_next(assoc_itr))) {
+				if(strcmp(assoc->user, user->name)) 
+					continue;
+				
+				if(!user->assoc_list)
+					user->assoc_list = list_create(
+						destroy_acct_association_rec);
+				list_append(user->assoc_list, assoc);
+				list_remove(assoc_itr);
+			}
+			list_iterator_reset(assoc_itr);
+			if(!user->assoc_list)
+				list_remove(itr);
+		}
+		list_iterator_destroy(itr);
+		list_iterator_destroy(assoc_itr);
+
+		list_destroy(assoc_list);
+	}
+
 	return user_list;
 #else
 	return NULL;
@@ -5145,13 +6152,11 @@ extern List acct_storage_p_get_accts(mysql_conn_t *mysql_conn, uid_t uid,
 	char *acct_req_inx[] = {
 		"name",
 		"description",
-		"qos",
 		"organization"
 	};
 	enum {
 		ACCT_REQ_NAME,
 		ACCT_REQ_DESC,
-		ACCT_REQ_QOS,
 		ACCT_REQ_ORG,
 		ACCT_REQ_COUNT
 	};
@@ -5212,7 +6217,7 @@ extern List acct_storage_p_get_accts(mysql_conn_t *mysql_conn, uid_t uid,
 		while((object = list_next(itr))) {
 			if(set) 
 				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "name='%s'", object);
+			xstrfmtcat(extra, "name=\"%s\"", object);
 			set = 1;
 		}
 		list_iterator_destroy(itr);
@@ -5227,7 +6232,7 @@ extern List acct_storage_p_get_accts(mysql_conn_t *mysql_conn, uid_t uid,
 		while((object = list_next(itr))) {
 			if(set) 
 				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "description='%s'", object);
+			xstrfmtcat(extra, "description=\"%s\"", object);
 			set = 1;
 		}
 		list_iterator_destroy(itr);
@@ -5242,29 +6247,13 @@ extern List acct_storage_p_get_accts(mysql_conn_t *mysql_conn, uid_t uid,
 		while((object = list_next(itr))) {
 			if(set) 
 				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "organization='%s'", object);
+			xstrfmtcat(extra, "organization=\"%s\"", object);
 			set = 1;
 		}
 		list_iterator_destroy(itr);
 		xstrcat(extra, ")");
 	}
 	
-	if(acct_cond->qos_list && list_count(acct_cond->qos_list)) {
-		set = 0;
-		xstrcat(extra, " && (");
-		itr = list_iterator_create(acct_cond->qos_list);
-		while((object = list_next(itr))) {
-			if(set) 
-				xstrcat(extra, " || ");
-			xstrfmtcat(extra, 
-				   "(qos like '%%,%s' || qos like '%%,%s,%%')",
-				   object, object);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-		xstrcat(extra, ")");
-	}
-
 empty:
 
 	xfree(tmp);
@@ -5283,10 +6272,12 @@ empty:
 		itr = list_iterator_create(user.coord_accts);
 		while((coord = list_next(itr))) {
 			if(set) {
-				xstrfmtcat(extra, " || name='%s'", coord->name);
+				xstrfmtcat(extra, " || name=\"%s\"",
+					   coord->name);
 			} else {
 				set = 1;
-				xstrfmtcat(extra, " && (name='%s'",coord->name);
+				xstrfmtcat(extra, " && (name=\"%s\"",
+					   coord->name);
 			}
 		}		
 		list_iterator_destroy(itr);
@@ -5307,6 +6298,16 @@ empty:
 	xfree(query);
 
 	acct_list = list_create(destroy_acct_account_rec);
+	
+	if(acct_cond && acct_cond->with_assocs) {
+		/* We are going to be freeing the inners of
+			   this list in the acct->name so we don't
+			   free it here
+			*/
+		if(acct_cond->assoc_cond->acct_list) 
+			list_destroy(acct_cond->assoc_cond->acct_list);
+		acct_cond->assoc_cond->acct_list = list_create(NULL);
+	}
 
 	while((row = mysql_fetch_row(result))) {
 		acct_account_rec_t *acct = xmalloc(sizeof(acct_account_rec_t));
@@ -5315,37 +6316,59 @@ empty:
 		acct->name =  xstrdup(row[ACCT_REQ_NAME]);
 		acct->description = xstrdup(row[ACCT_REQ_DESC]);
 		acct->organization = xstrdup(row[ACCT_REQ_ORG]);
-		if(row[ACCT_REQ_QOS] && row[ACCT_REQ_QOS][0]) {
-			acct->qos_list = list_create(slurm_destroy_char);
-			slurm_addto_char_list(acct->qos_list,
-					      row[ACCT_REQ_QOS]);
-		}
 
 		if(acct_cond && acct_cond->with_coords) {
 			_get_account_coords(mysql_conn, acct);
 		}
 
 		if(acct_cond && acct_cond->with_assocs) {
-			acct_association_cond_t *assoc_cond = NULL;
 			if(!acct_cond->assoc_cond) {
 				acct_cond->assoc_cond = xmalloc(
 					sizeof(acct_association_cond_t));
 			}
-			assoc_cond = acct_cond->assoc_cond;
-			if(assoc_cond->acct_list)
-				list_destroy(assoc_cond->acct_list);
 
-			assoc_cond->acct_list = list_create(NULL);
-			list_append(assoc_cond->acct_list, acct->name);
-			acct->assoc_list = acct_storage_p_get_associations(
-				mysql_conn, uid, assoc_cond);
-			list_destroy(assoc_cond->acct_list);
-			assoc_cond->acct_list = NULL;
+			list_append(acct_cond->assoc_cond->acct_list,
+				    acct->name);
 		}
-
 	}
 	mysql_free_result(result);
 
+	if(acct_cond && acct_cond->with_assocs
+	   && list_count(acct_cond->assoc_cond->acct_list)) {
+		ListIterator assoc_itr = NULL;
+		acct_account_rec_t *acct = NULL;
+		acct_association_rec_t *assoc = NULL;
+		List assoc_list = acct_storage_p_get_associations(
+			mysql_conn, uid, acct_cond->assoc_cond);
+
+		if(!assoc_list) {
+			error("no associations");
+			return acct_list;
+		}
+
+		itr = list_iterator_create(acct_list);
+		assoc_itr = list_iterator_create(assoc_list);
+		while((acct = list_next(itr))) {
+			while((assoc = list_next(assoc_itr))) {
+				if(strcmp(assoc->acct, acct->name)) 
+					continue;
+				
+				if(!acct->assoc_list)
+					acct->assoc_list = list_create(
+						destroy_acct_association_rec);
+				list_append(acct->assoc_list, assoc);
+				list_remove(assoc_itr);
+			}
+			list_iterator_reset(assoc_itr);
+			if(!acct->assoc_list)
+				list_remove(itr);
+		}
+		list_iterator_destroy(itr);
+		list_iterator_destroy(assoc_itr);
+
+		list_destroy(assoc_list);
+	}
+
 	return acct_list;
 #else
 	return NULL;
@@ -5366,34 +6389,28 @@ extern List acct_storage_p_get_clusters(mysql_conn_t *mysql_conn, uid_t uid,
 	int i=0;
 	MYSQL_RES *result = NULL;
 	MYSQL_ROW row;
+	acct_association_cond_t assoc_cond;
+	ListIterator assoc_itr = NULL;
+	acct_cluster_rec_t *cluster = NULL;
+	acct_association_rec_t *assoc = NULL;
+	List assoc_list = NULL;
 
 	/* if this changes you will need to edit the corresponding enum */
 	char *cluster_req_inx[] = {
 		"name",
 		"control_host",
-		"control_port"
+		"control_port",
+		"rpc_version",
+		"valid_qos",
 	};
 	enum {
 		CLUSTER_REQ_NAME,
 		CLUSTER_REQ_CH,
 		CLUSTER_REQ_CP,
+		CLUSTER_REQ_VERSION,
+		CLUSTER_REQ_VALID_QOS,
 		CLUSTER_REQ_COUNT
 	};
-	char *assoc_req_inx[] = {
-		"fairshare",
-		"max_jobs",
-		"max_nodes_per_job",
-		"max_wall_duration_per_job",
-		"max_cpu_secs_per_job",
-	};
-	enum {
-		ASSOC_REQ_FS,
-		ASSOC_REQ_MJ,
-		ASSOC_REQ_MNPJ,
-		ASSOC_REQ_MWPJ,
-		ASSOC_REQ_MCPJ,
-		ASSOC_REQ_COUNT
-	};
 
 	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
 		return NULL;
@@ -5409,14 +6426,15 @@ extern List acct_storage_p_get_clusters(mysql_conn_t *mysql_conn, uid_t uid,
 	else
 		xstrcat(extra, "where deleted=0");
 
-	if(cluster_cond->cluster_list && list_count(cluster_cond->cluster_list)) {
+	if(cluster_cond->cluster_list 
+	   && list_count(cluster_cond->cluster_list)) {
 		set = 0;
 		xstrcat(extra, " && (");
 		itr = list_iterator_create(cluster_cond->cluster_list);
 		while((object = list_next(itr))) {
 			if(set) 
 				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "name='%s'", object);
+			xstrfmtcat(extra, "name=\"%s\"", object);
 			set = 1;
 		}
 		list_iterator_destroy(itr);
@@ -5445,25 +6463,22 @@ empty:
 	}
 	xfree(query);
 
-	i=0;
-	xstrfmtcat(tmp, "%s", assoc_req_inx[i]);
-	for(i=1; i<ASSOC_REQ_COUNT; i++) {
-		xstrfmtcat(tmp, ", %s", assoc_req_inx[i]);
-	}
-
 	cluster_list = list_create(destroy_acct_cluster_rec);
 
+	memset(&assoc_cond, 0, sizeof(acct_association_cond_t));
+
+	assoc_cond.cluster_list = list_create(NULL);
+
 	while((row = mysql_fetch_row(result))) {
-		acct_cluster_rec_t *cluster =
-			xmalloc(sizeof(acct_cluster_rec_t));
-		MYSQL_RES *result2 = NULL;
-		MYSQL_ROW row2;
+		cluster = xmalloc(sizeof(acct_cluster_rec_t));
 		list_append(cluster_list, cluster);
 
-		cluster->name =  xstrdup(row[CLUSTER_REQ_NAME]);
+		cluster->name = xstrdup(row[CLUSTER_REQ_NAME]);
+
+		list_append(assoc_cond.cluster_list, cluster->name);
 
 		/* get the usage if requested */
-		if(cluster_cond->with_usage) {
+		if(cluster_cond && cluster_cond->with_usage) {
 			clusteracct_storage_p_get_usage(
 				mysql_conn, uid, cluster,
 				cluster_cond->usage_start,
@@ -5472,48 +6487,60 @@ empty:
 
 		cluster->control_host = xstrdup(row[CLUSTER_REQ_CH]);
 		cluster->control_port = atoi(row[CLUSTER_REQ_CP]);
-		query = xstrdup_printf("select %s from %s where cluster='%s' "
-				       "&& acct='root'", 
-				       tmp, assoc_table, cluster->name);
-		if(!(result2 = mysql_db_query_ret(mysql_conn->db_conn,
-						  query, 1))) {
-			xfree(query);
-			break;
-		}
-		xfree(query);
-		row2 = mysql_fetch_row(result2);
-
-		if(row2 && row2[ASSOC_REQ_FS])
-			cluster->default_fairshare = atoi(row2[ASSOC_REQ_FS]);
-		else
-			cluster->default_fairshare = 1;
+		cluster->rpc_version = atoi(row[CLUSTER_REQ_VERSION]);
+		cluster->valid_qos_list = list_create(slurm_destroy_char);
 
-		if(row2 && row2[ASSOC_REQ_MJ])
-			cluster->default_max_jobs = atoi(row2[ASSOC_REQ_MJ]);
-		else
-			cluster->default_max_jobs = INFINITE;
-		
-		if(row2 && row2[ASSOC_REQ_MNPJ])
-			cluster->default_max_nodes_per_job =
-				atoi(row2[ASSOC_REQ_MNPJ]);
-		else
-			cluster->default_max_nodes_per_job = INFINITE;
-		
-		if(row2 && row2[ASSOC_REQ_MWPJ])
-			cluster->default_max_wall_duration_per_job = 
-				atoi(row2[ASSOC_REQ_MWPJ]);
-		else
-			cluster->default_max_wall_duration_per_job = INFINITE;
-		
-		if(row2 && row2[ASSOC_REQ_MCPJ])
-			cluster->default_max_cpu_secs_per_job = 
-				atoi(row2[ASSOC_REQ_MCPJ]);
+		if(row[CLUSTER_REQ_VALID_QOS] && row[CLUSTER_REQ_VALID_QOS][0])
+			slurm_addto_char_list(assoc->qos_list,
+					      row[CLUSTER_REQ_VALID_QOS]+1);
 		else 
-			cluster->default_max_cpu_secs_per_job = INFINITE;
-		mysql_free_result(result2);
+			list_append(cluster->valid_qos_list, xstrdup("all"));
 	}
 	mysql_free_result(result);
-	xfree(tmp);
+
+	if(!list_count(assoc_cond.cluster_list)) {
+		list_destroy(assoc_cond.cluster_list);
+		return cluster_list;
+	}
+	
+	assoc_cond.acct_list = list_create(NULL);
+	list_append(assoc_cond.acct_list, "root");
+
+	assoc_cond.user_list = list_create(NULL);
+	list_append(assoc_cond.user_list, "");
+
+	assoc_list = acct_storage_p_get_associations(mysql_conn,
+						     uid, &assoc_cond);
+	list_destroy(assoc_cond.cluster_list);
+	list_destroy(assoc_cond.acct_list);
+	list_destroy(assoc_cond.user_list);
+
+	if(!assoc_list) 
+		return cluster_list;
+	
+	itr = list_iterator_create(cluster_list);
+	assoc_itr = list_iterator_create(assoc_list);
+	while((cluster = list_next(itr))) {
+		while((assoc = list_next(assoc_itr))) {
+			if(strcmp(assoc->cluster, cluster->name)) 
+				continue;
+			
+			if(cluster->root_assoc) {
+				debug("This cluster %s already has "
+				      "an association.");
+				continue;
+			}
+			cluster->root_assoc = assoc;
+			list_remove(assoc_itr);
+		}
+		list_iterator_reset(assoc_itr);
+	}
+	list_iterator_destroy(itr);
+	list_iterator_destroy(assoc_itr);
+	if(list_count(assoc_list))
+		info("I have %d left over associations", 
+		     list_count(assoc_list));
+	list_destroy(assoc_list);
 
 	return cluster_list;
 #else
@@ -5530,16 +6557,20 @@ extern List acct_storage_p_get_associations(mysql_conn_t *mysql_conn,
 	char *extra = NULL;	
 	char *tmp = NULL;	
 	List assoc_list = NULL;
+	List delta_qos_list = NULL;
 	ListIterator itr = NULL;
-	char *object = NULL;
 	int set = 0;
 	int i=0, is_admin=1;
 	MYSQL_RES *result = NULL;
 	MYSQL_ROW row;
-	int parent_mj = INFINITE;
-	int parent_mnpj = INFINITE;
-	int parent_mwpj = INFINITE;
-	int parent_mcpj = INFINITE;
+	uint32_t parent_mj = INFINITE;
+        uint32_t parent_msj = INFINITE;
+	uint32_t parent_mcpj = INFINITE;
+	uint32_t parent_mnpj = INFINITE;
+	uint32_t parent_mwpj = INFINITE;
+	uint64_t parent_mcmpj = INFINITE;
+	char *parent_qos = NULL;
+	char *parent_delta_qos = NULL;
 	char *last_acct = NULL;
 	char *last_acct_parent = NULL;
 	char *last_cluster = NULL;
@@ -5553,6 +6584,7 @@ extern List acct_storage_p_get_associations(mysql_conn_t *mysql_conn,
 	uint16_t without_parent_info = 0;
 	uint16_t without_parent_limits = 0;
 	uint16_t with_usage = 0;
+	uint16_t with_raw_qos = 0;
 
 	/* if this changes you will need to edit the corresponding enum */
 	char *assoc_req_inx[] = {
@@ -5563,12 +6595,22 @@ extern List acct_storage_p_get_associations(mysql_conn_t *mysql_conn,
 		"acct",
 		"cluster",
 		"partition",
-		"parent_acct",
 		"fairshare",
+		"grp_cpu_mins",
+		"grp_cpus",
+		"grp_jobs",
+		"grp_nodes",
+		"grp_submit_jobs",
+		"grp_wall",
+		"max_cpu_mins_per_job",
+		"max_cpus_per_job",
 		"max_jobs",
 		"max_nodes_per_job",
+		"max_submit_jobs",
 		"max_wall_duration_per_job",
-		"max_cpu_secs_per_job",
+		"parent_acct",
+		"qos",
+		"delta_qos",
 	};
 	enum {
 		ASSOC_REQ_ID,
@@ -5578,24 +6620,39 @@ extern List acct_storage_p_get_associations(mysql_conn_t *mysql_conn,
 		ASSOC_REQ_ACCT,
 		ASSOC_REQ_CLUSTER,
 		ASSOC_REQ_PART,
-		ASSOC_REQ_PARENT,
 		ASSOC_REQ_FS,
+		ASSOC_REQ_GCH,
+		ASSOC_REQ_GC,
+		ASSOC_REQ_GJ,
+		ASSOC_REQ_GN,
+		ASSOC_REQ_GSJ,
+		ASSOC_REQ_GW,
+		ASSOC_REQ_MCMPJ,
+		ASSOC_REQ_MCPJ,
 		ASSOC_REQ_MJ,
 		ASSOC_REQ_MNPJ,
+		ASSOC_REQ_MSJ,
 		ASSOC_REQ_MWPJ,
-		ASSOC_REQ_MCPJ,
+		ASSOC_REQ_PARENT,
+		ASSOC_REQ_QOS,
+		ASSOC_REQ_DELTA_QOS,
 		ASSOC_REQ_COUNT
 	};
+
 	enum {
 		ASSOC2_REQ_PARENT_ID,
 		ASSOC2_REQ_MJ,
+		ASSOC2_REQ_MSJ,
+		ASSOC2_REQ_MCPJ,
 		ASSOC2_REQ_MNPJ,
 		ASSOC2_REQ_MWPJ,
-		ASSOC2_REQ_MCPJ
+		ASSOC2_REQ_MCMPJ,
+		ASSOC2_REQ_QOS,
+		ASSOC2_REQ_DELTA_QOS,
 	};
 
 	if(!assoc_cond) {
-		xstrcat(extra, "where deleted=0");
+		xstrcat(extra, " where deleted=0");
 		goto empty;
 	}
 
@@ -5629,90 +6686,17 @@ extern List acct_storage_p_get_associations(mysql_conn_t *mysql_conn,
 		}
 	}
 
-	if(assoc_cond->with_deleted) 
-		xstrcat(extra, "where (deleted=0 || deleted=1)");
-	else
-		xstrcat(extra, "where deleted=0");
-
-	if(assoc_cond->acct_list && list_count(assoc_cond->acct_list)) {
-		set = 0;
-		xstrcat(extra, " && (");
-		itr = list_iterator_create(assoc_cond->acct_list);
-		while((object = list_next(itr))) {
-			if(set) 
-				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "acct='%s'", object);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-		xstrcat(extra, ")");
-	}
-
-	if(assoc_cond->cluster_list && list_count(assoc_cond->cluster_list)) {
-		set = 0;
-		xstrcat(extra, " && (");
-		itr = list_iterator_create(assoc_cond->cluster_list);
-		while((object = list_next(itr))) {
-			if(set) 
-				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "cluster='%s'", object);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-		xstrcat(extra, ")");
-	}
-
-	if(assoc_cond->user_list && list_count(assoc_cond->user_list)) {
-		set = 0;
-		xstrcat(extra, " && (");
-		itr = list_iterator_create(assoc_cond->user_list);
-		while((object = list_next(itr))) {
-			if(set) 
-				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "user='%s'", object);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-		xstrcat(extra, ")");
-	}
-
-	if(assoc_cond->id_list && list_count(assoc_cond->id_list)) {
-		set = 0;
-		xstrcat(extra, " && (");
-		itr = list_iterator_create(assoc_cond->id_list);
-		while((object = list_next(itr))) {
-			char *ptr = NULL;
-			long num = strtol(object, &ptr, 10);
-			if ((num == 0) && ptr && ptr[0]) {
-				error("Invalid value for assoc id (%s)",
-				      object);
-				xfree(extra);
-				list_iterator_destroy(itr);
-				return NULL;
-			}
-
-			if(set) 
-				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "id=%s", object);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-		xstrcat(extra, ")");
-	}
-	
-	if(assoc_cond->parent_acct) {
-		xstrfmtcat(extra, " && parent_acct='%s'",
-			   assoc_cond->parent_acct);
-	}
+	set = _setup_association_cond_limits(assoc_cond, &extra);
 
+	with_raw_qos = assoc_cond->with_raw_qos;
 	with_usage = assoc_cond->with_usage;
 	without_parent_limits = assoc_cond->without_parent_limits;
 	without_parent_info = assoc_cond->without_parent_info;
 empty:
 	xfree(tmp);
-	xstrfmtcat(tmp, "%s", assoc_req_inx[i]);
+	xstrfmtcat(tmp, "t1.%s", assoc_req_inx[i]);
 	for(i=1; i<ASSOC_REQ_COUNT; i++) {
-		xstrfmtcat(tmp, ", %s", assoc_req_inx[i]);
+		xstrfmtcat(tmp, ", t1.%s", assoc_req_inx[i]);
 	}
 	
 	/* this is here to make sure we are looking at only this user
@@ -5720,13 +6704,13 @@ empty:
 	 * coordinator of.
 	 */
 	if(!is_admin && (private_data & PRIVATE_DATA_USERS)) {
-		query = xstrdup_printf("select lft from %s where user='%s'", 
+		query = xstrdup_printf("select lft from %s where user=\"%s\"", 
 				       assoc_table, user.name);
 		if(user.coord_accts) {
 			acct_coord_rec_t *coord = NULL;
 			itr = list_iterator_create(user.coord_accts);
 			while((coord = list_next(itr))) {
-				xstrfmtcat(query, " || acct='%s'",
+				xstrfmtcat(query, " || acct=\"%s\"",
 					   coord->name);
 			}
 			list_iterator_destroy(itr);
@@ -5757,7 +6741,8 @@ empty:
 		mysql_free_result(result);
 	}
 	
-	query = xstrdup_printf("select %s from %s %s order by lft;", 
+	query = xstrdup_printf("select distinct %s from %s as t1%s "
+			       "order by lft;", 
 			       tmp, assoc_table, extra);
 	xfree(tmp);
 	xfree(extra);
@@ -5770,7 +6755,7 @@ empty:
 	xfree(query);
 
 	assoc_list = list_create(destroy_acct_association_rec);
-
+	delta_qos_list = list_create(slurm_destroy_char);
 	while((row = mysql_fetch_row(result))) {
 		acct_association_rec_t *assoc =
 			xmalloc(sizeof(acct_association_rec_t));
@@ -5783,11 +6768,40 @@ empty:
 		assoc->lft = atoi(row[ASSOC_REQ_LFT]);
 		assoc->rgt = atoi(row[ASSOC_REQ_RGT]);
 
-		if(row[ASSOC_REQ_USER][0])
-			assoc->user = xstrdup(row[ASSOC_REQ_USER]);
-		assoc->acct = xstrdup(row[ASSOC_REQ_ACCT]);
-		assoc->cluster = xstrdup(row[ASSOC_REQ_CLUSTER]);
-			
+		if(row[ASSOC_REQ_USER][0])
+			assoc->user = xstrdup(row[ASSOC_REQ_USER]);
+		assoc->acct = xstrdup(row[ASSOC_REQ_ACCT]);
+		assoc->cluster = xstrdup(row[ASSOC_REQ_CLUSTER]);
+
+		if(row[ASSOC_REQ_GJ])
+			assoc->grp_jobs = atoi(row[ASSOC_REQ_GJ]);
+		else
+			assoc->grp_jobs = INFINITE;
+
+		if(row[ASSOC_REQ_GSJ])
+			assoc->grp_submit_jobs = atoi(row[ASSOC_REQ_GSJ]);
+		else
+			assoc->grp_submit_jobs = INFINITE;
+
+		if(row[ASSOC_REQ_GC])
+			assoc->grp_cpus = atoi(row[ASSOC_REQ_GC]);
+		else
+			assoc->grp_cpus = INFINITE;
+
+		if(row[ASSOC_REQ_GN])
+			assoc->grp_nodes = atoi(row[ASSOC_REQ_GN]);
+		else
+			assoc->grp_nodes = INFINITE;
+		if(row[ASSOC_REQ_GW])
+			assoc->grp_wall = atoi(row[ASSOC_REQ_GW]);
+		else
+			assoc->grp_wall = INFINITE;
+
+		if(row[ASSOC_REQ_GCH])
+			assoc->grp_cpu_mins = atoll(row[ASSOC_REQ_GCH]);
+		else
+			assoc->grp_cpu_mins = INFINITE;
+
 		/* get the usage if requested */
 		if(with_usage) {
 			acct_storage_p_get_usage(mysql_conn, uid, assoc,
@@ -5805,8 +6819,8 @@ empty:
 			   || strcmp(row[ASSOC_REQ_CLUSTER], last_cluster)) {
 				query = xstrdup_printf(
 					"select id from %s where user='' "
-					"and deleted = 0 and acct='%s' "
-					"and cluster='%s';", 
+					"and deleted = 0 and acct=\"%s\" "
+					"and cluster=\"%s\";", 
 					assoc_table, row[ASSOC_REQ_PARENT],
 					row[ASSOC_REQ_CLUSTER]);
 				debug4("%d(%d) query\n%s",
@@ -5819,10 +6833,12 @@ empty:
 					break;
 				}
 				xfree(query);
-				row2 = mysql_fetch_row(result2);
-				last_acct_parent = row[ASSOC_REQ_PARENT];
-				last_cluster = row[ASSOC_REQ_CLUSTER];
-				acct_parent_id = atoi(row2[0]);	
+				if((row2 = mysql_fetch_row(result2))) {
+					last_acct_parent = 
+						row[ASSOC_REQ_PARENT];
+					last_cluster = row[ASSOC_REQ_CLUSTER];
+					acct_parent_id = atoi(row2[0]);	
+				}
 				mysql_free_result(result2);
 			}
 			assoc->parent_acct = xstrdup(row[ASSOC_REQ_PARENT]);
@@ -5840,8 +6856,10 @@ empty:
 		    || strcmp(row[ASSOC_REQ_ACCT], last_acct)
 		    || strcmp(row[ASSOC_REQ_CLUSTER], last_cluster2))) {
 			query = xstrdup_printf(
-				"call get_parent_limits('%s', '%s', '%s', %u);"
-				"select @par_id, @mj, @mnpj, @mwpj, @mcpj;", 
+				"call get_parent_limits(\"%s\", "
+				"\"%s\", \"%s\", %u);"
+				"select @par_id, @mj, @msj, @mcpj, "
+				"@mnpj, @mwpj, @mcmpj, @qos, @delta_qos;", 
 				assoc_table, row[ASSOC_REQ_ACCT],
 				row[ASSOC_REQ_CLUSTER],
 				without_parent_limits);
@@ -5854,9 +6872,25 @@ empty:
 			}
 			xfree(query);
 			
-			row2 = mysql_fetch_row(result2);
+			if(!(row2 = mysql_fetch_row(result2))) {
+				user_parent_id = 0;
+				goto no_parent_limits;
+			}
+
 			user_parent_id = atoi(row2[ASSOC2_REQ_PARENT_ID]);
 			if(!without_parent_limits) {
+				if(row2[ASSOC2_REQ_MCMPJ])
+					parent_mcmpj =
+						atoi(row2[ASSOC2_REQ_MCMPJ]);
+				else
+					parent_mcmpj = INFINITE;
+				
+				if(row2[ASSOC2_REQ_MCPJ])
+					parent_mcpj =
+						atoi(row2[ASSOC2_REQ_MCPJ]);
+				else
+					parent_mcpj = INFINITE;
+				
 				if(row2[ASSOC2_REQ_MJ])
 					parent_mj = atoi(row2[ASSOC2_REQ_MJ]);
 				else
@@ -5874,35 +6908,140 @@ empty:
 				else
 					parent_mwpj = INFINITE;
 				
-				if(row2[ASSOC2_REQ_MCPJ])
-					parent_mcpj =
-						atoi(row2[ASSOC2_REQ_MCPJ]);
+				if(row2[ASSOC2_REQ_MCMPJ])
+					parent_mcmpj =
+						atoll(row2[ASSOC2_REQ_MCMPJ]);
 				else 
-					parent_mcpj = INFINITE;
+					parent_mcmpj = INFINITE;
+
+				xfree(parent_qos);
+				if(row2[ASSOC2_REQ_QOS][0])
+					parent_qos =
+						xstrdup(row2[ASSOC2_REQ_QOS]);
+				else 
+					parent_qos = NULL;
+
+				xfree(parent_delta_qos);
+				if(row2[ASSOC2_REQ_DELTA_QOS][0])
+					xstrcat(parent_delta_qos, 
+						row2[ASSOC2_REQ_DELTA_QOS]);
+				else
+					parent_delta_qos = NULL;
+
+				if(row2[ASSOC2_REQ_MSJ])
+					parent_msj = atoi(row2[ASSOC2_REQ_MSJ]);
+				else
+					parent_msj = INFINITE;
 			}
 			last_acct = row[ASSOC_REQ_ACCT];
 			last_cluster2 = row[ASSOC_REQ_CLUSTER];
+		no_parent_limits:
 			mysql_free_result(result2);
 		}
 		if(row[ASSOC_REQ_MJ])
 			assoc->max_jobs = atoi(row[ASSOC_REQ_MJ]);
 		else
 			assoc->max_jobs = parent_mj;
+
+		if(row[ASSOC_REQ_MSJ])
+			assoc->max_submit_jobs = atoi(row[ASSOC_REQ_MSJ]);
+		else
+			assoc->max_submit_jobs = parent_msj;
+
+		if(row[ASSOC_REQ_MCPJ])
+			assoc->max_cpus_pj = 
+				atoi(row[ASSOC_REQ_MCPJ]);
+		else
+			assoc->max_cpus_pj = parent_mcpj;
+
 		if(row[ASSOC_REQ_MNPJ])
-			assoc->max_nodes_per_job = 
+			assoc->max_nodes_pj = 
 				atoi(row[ASSOC_REQ_MNPJ]);
 		else
-			assoc->max_nodes_per_job = parent_mnpj;
+			assoc->max_nodes_pj = parent_mnpj;
+
 		if(row[ASSOC_REQ_MWPJ])
-			assoc->max_wall_duration_per_job = 
+			assoc->max_wall_pj = 
 				atoi(row[ASSOC_REQ_MWPJ]);
 		else
-			assoc->max_wall_duration_per_job = parent_mwpj;
-		if(row[ASSOC_REQ_MCPJ])
-			assoc->max_cpu_secs_per_job = 
-				atoi(row[ASSOC_REQ_MCPJ]);
+			assoc->max_wall_pj = parent_mwpj;
+
+		if(row[ASSOC_REQ_MCMPJ])
+			assoc->max_cpu_mins_pj = 
+				atoi(row[ASSOC_REQ_MCMPJ]);
 		else
-			assoc->max_cpu_secs_per_job = parent_mcpj;
+			assoc->max_cpu_mins_pj = parent_mcmpj;
+
+		assoc->qos_list = list_create(slurm_destroy_char);
+
+		/* do a plus 1 since a comma is the first thing there
+		 * in the list.  Also you can never have both a qos
+		 * and a delta qos so if you have a qos don't worry
+		 * about the delta.
+		 */
+		if(row[ASSOC_REQ_QOS][0]) 
+			slurm_addto_char_list(assoc->qos_list,
+					      row[ASSOC_REQ_QOS]+1);
+		else {
+			if(parent_qos) 
+				slurm_addto_char_list(assoc->qos_list,
+						      parent_qos+1);
+			/* if qos is set on the association itself do
+			   not worry about the deltas
+			*/
+			if(row[ASSOC_REQ_DELTA_QOS][0]) 
+				slurm_addto_char_list(
+					delta_qos_list,
+					row[ASSOC_REQ_DELTA_QOS]+1);
+			if(parent_delta_qos)
+				slurm_addto_char_list(delta_qos_list,
+						      parent_delta_qos+1);
+		}
+
+		/* Sometimes we want to see exactly what is here in
+		   the database instead of a complete list.  This will
+		   give it to us.
+		*/
+		if(with_raw_qos && list_count(delta_qos_list)) {
+			list_transfer(assoc->qos_list, delta_qos_list);
+		} else if(list_count(delta_qos_list)) {
+			ListIterator curr_qos_itr = 
+				list_iterator_create(assoc->qos_list);
+			ListIterator new_qos_itr = 
+				list_iterator_create(delta_qos_list);
+			char *new_qos = NULL, *curr_qos = NULL;
+			
+			while((new_qos = list_next(new_qos_itr))) {
+				if(new_qos[0] == '-') {
+					while((curr_qos =
+					       list_next(curr_qos_itr))) {
+						if(!strcmp(curr_qos,
+							   new_qos+1)) {
+							list_delete_item(
+								curr_qos_itr);
+							break;
+						}
+					}
+					list_iterator_reset(curr_qos_itr);
+				} else if(new_qos[0] == '+') {
+					while((curr_qos =
+					       list_next(curr_qos_itr))) {
+						if(!strcmp(curr_qos,
+							   new_qos+1)) {
+							break;
+						}
+					}
+					list_iterator_reset(curr_qos_itr);
+					if(!curr_qos)
+						list_append(assoc->qos_list,
+							    xstrdup(new_qos+1));
+				}
+			}
+			
+			list_iterator_destroy(new_qos_itr);
+			list_iterator_destroy(curr_qos_itr);
+			list_flush(delta_qos_list);
+		}
 
 		/* don't do this unless this is an user association */
 		if(assoc->user && assoc->parent_id != acct_parent_id) 
@@ -5913,6 +7052,11 @@ empty:
 	}
 	mysql_free_result(result);
 
+	list_destroy(delta_qos_list);
+
+	xfree(parent_delta_qos);
+	xfree(parent_qos);
+
 	return assoc_list;
 #else
 	return NULL;
@@ -5938,12 +7082,44 @@ extern List acct_storage_p_get_qos(mysql_conn_t *mysql_conn, uid_t uid,
 	char *qos_req_inx[] = {
 		"name",
 		"description",
-		"id"
+		"id",
+		"grp_cpu_mins",
+		"grp_cpus",
+		"grp_jobs",
+		"grp_nodes",
+		"grp_submit_jobs",
+		"grp_wall",
+		"max_cpu_mins_per_user",
+		"max_cpus_per_user",
+		"max_jobs_per_user",
+		"max_nodes_per_user",
+		"max_submit_jobs_per_user",
+		"max_wall_duration_per_user",
+		"job_flags",
+		"preemptees",
+		"preemptors",
+		"priority",
 	};
 	enum {
 		QOS_REQ_NAME,
 		QOS_REQ_DESC,
 		QOS_REQ_ID,
+		QOS_REQ_GCH,
+		QOS_REQ_GC,
+		QOS_REQ_GJ,
+		QOS_REQ_GN,
+		QOS_REQ_GSJ,
+		QOS_REQ_GW,
+		QOS_REQ_MCMPU,
+		QOS_REQ_MCPU,
+		QOS_REQ_MJPU,
+		QOS_REQ_MNPU,
+		QOS_REQ_MSJPU,
+		QOS_REQ_MWPU,
+		QOS_REQ_JOBF,
+		QOS_REQ_PREE,
+		QOS_REQ_PREO,
+		QOS_REQ_PRIO,
 		QOS_REQ_COUNT
 	};
 
@@ -5971,7 +7147,7 @@ extern List acct_storage_p_get_qos(mysql_conn_t *mysql_conn, uid_t uid,
 		while((object = list_next(itr))) {
 			if(set) 
 				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "description='%s'", object);
+			xstrfmtcat(extra, "description=\"%s\"", object);
 			set = 1;
 		}
 		list_iterator_destroy(itr);
@@ -5986,7 +7162,7 @@ extern List acct_storage_p_get_qos(mysql_conn_t *mysql_conn, uid_t uid,
 		while((object = list_next(itr))) {
 			if(set) 
 				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "id='%s'", object);
+			xstrfmtcat(extra, "id=\"%s\"", object);
 			set = 1;
 		}
 		list_iterator_destroy(itr);
@@ -6001,7 +7177,7 @@ extern List acct_storage_p_get_qos(mysql_conn_t *mysql_conn, uid_t uid,
 		while((object = list_next(itr))) {
 			if(set) 
 				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "name='%s'", object);
+			xstrfmtcat(extra, "name=\"%s\"", object);
 			set = 1;
 		}
 		list_iterator_destroy(itr);
@@ -6034,9 +7210,81 @@ empty:
 		acct_qos_rec_t *qos = xmalloc(sizeof(acct_qos_rec_t));
 		list_append(qos_list, qos);
 
-		qos->description = xstrdup(row[QOS_REQ_DESC]);
+		if(row[QOS_REQ_DESC] && row[QOS_REQ_DESC][0])
+			qos->description = xstrdup(row[QOS_REQ_DESC]);
+
 		qos->id = atoi(row[QOS_REQ_ID]);
-		qos->name =  xstrdup(row[QOS_REQ_NAME]);
+
+		if(row[QOS_REQ_NAME] && row[QOS_REQ_NAME][0])
+			qos->name =  xstrdup(row[QOS_REQ_NAME]);
+
+		if(row[QOS_REQ_JOBF] && row[QOS_REQ_JOBF][0])
+		qos->job_flags =  xstrdup(row[QOS_REQ_JOBF]);
+
+		if(row[QOS_REQ_GCH])
+			qos->grp_cpu_mins = atoll(row[QOS_REQ_GCH]);
+		else
+			qos->grp_cpu_mins = INFINITE;
+		if(row[QOS_REQ_GC])
+			qos->grp_cpus = atoi(row[QOS_REQ_GC]);
+		else
+			qos->grp_cpus = INFINITE;
+		if(row[QOS_REQ_GJ])
+			qos->grp_jobs = atoi(row[QOS_REQ_GJ]);
+		else
+			qos->grp_jobs = INFINITE;
+		if(row[QOS_REQ_GN])
+			qos->grp_nodes = atoi(row[QOS_REQ_GN]);
+		else
+			qos->grp_nodes = INFINITE;
+		if(row[QOS_REQ_GSJ])
+			qos->grp_submit_jobs = atoi(row[QOS_REQ_GSJ]);
+		else
+			qos->grp_submit_jobs = INFINITE;
+		if(row[QOS_REQ_GW])
+			qos->grp_wall = atoi(row[QOS_REQ_GW]);
+		else
+			qos->grp_wall = INFINITE;
+
+		if(row[QOS_REQ_MCMPU])
+			qos->max_cpu_mins_pu = atoi(row[QOS_REQ_MCMPU]);
+		else
+			qos->max_cpu_mins_pu = INFINITE;
+		if(row[QOS_REQ_MCPU])
+			qos->max_cpus_pu = atoi(row[QOS_REQ_MCPU]);
+		else
+			qos->max_cpus_pu = INFINITE;
+		if(row[QOS_REQ_MJPU])
+			qos->max_jobs_pu = atoi(row[QOS_REQ_MJPU]);
+		else
+			qos->max_jobs_pu = INFINITE;
+		if(row[QOS_REQ_MNPU])
+			qos->max_nodes_pu = atoi(row[QOS_REQ_MNPU]);
+		else
+			qos->max_nodes_pu = INFINITE;
+		if(row[QOS_REQ_MSJPU])
+			qos->max_submit_jobs_pu = atoi(row[QOS_REQ_MSJPU]);
+		else
+			qos->max_submit_jobs_pu = INFINITE;
+		if(row[QOS_REQ_MWPU])
+			qos->max_wall_pu = atoi(row[QOS_REQ_MWPU]);
+		else
+			qos->max_wall_pu = INFINITE;
+
+		if(row[QOS_REQ_PREE] && row[QOS_REQ_PREE][0]) {
+			qos->preemptee_list = list_create(slurm_destroy_char);
+			slurm_addto_char_list(qos->preemptee_list,
+					      row[QOS_REQ_PREE]+1);
+		} 
+
+		if(row[QOS_REQ_PREE] && row[QOS_REQ_PREE][0]) {
+			qos->preemptee_list = list_create(slurm_destroy_char);
+			slurm_addto_char_list(qos->preemptee_list,
+					      row[QOS_REQ_PREE]+1);
+		} 
+
+		if(row[QOS_REQ_PRIO])
+			qos->priority = atoi(row[QOS_REQ_PRIO]);
 	}
 	mysql_free_result(result);
 
@@ -6051,6 +7299,8 @@ extern List acct_storage_p_get_txn(mysql_conn_t *mysql_conn, uid_t uid,
 {
 #ifdef HAVE_MYSQL
 	char *query = NULL;	
+	char *assoc_extra = NULL;	
+	char *name_extra = NULL;	
 	char *extra = NULL;	
 	char *tmp = NULL;	
 	List txn_list = NULL;
@@ -6086,6 +7336,138 @@ extern List acct_storage_p_get_txn(mysql_conn_t *mysql_conn, uid_t uid,
 	if(!txn_cond) 
 		goto empty;
 
+	/* handle query for associations first */
+	if(txn_cond->acct_list && list_count(txn_cond->acct_list)) {
+		set = 0;
+		if(assoc_extra)
+			xstrcat(assoc_extra, " && (");
+		else
+			xstrcat(assoc_extra, " where (");
+
+		if(name_extra)
+			xstrcat(name_extra, " && (");
+		else	
+			xstrcat(name_extra, " (");
+		itr = list_iterator_create(txn_cond->acct_list);
+		while((object = list_next(itr))) {
+			if(set) {
+				xstrcat(assoc_extra, " || ");
+				xstrcat(name_extra, " || ");
+			}
+
+			xstrfmtcat(assoc_extra, "acct=\"%s\"", object);
+
+			xstrfmtcat(name_extra, "(name like \"%%\"%s\"%%\""
+				   " || name=\"%s\")", object, object);
+
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(assoc_extra, ")");
+		xstrcat(name_extra, ")");		
+	}
+
+	if(txn_cond->cluster_list && list_count(txn_cond->cluster_list)) {
+		set = 0;
+		if(assoc_extra)
+			xstrcat(assoc_extra, " && (");
+		else
+			xstrcat(assoc_extra, " where (");
+
+		if(name_extra)
+			xstrcat(name_extra, " && (");
+		else	
+			xstrcat(name_extra, "(");
+			
+		itr = list_iterator_create(txn_cond->cluster_list);
+		while((object = list_next(itr))) {
+			if(set) { 
+				xstrcat(assoc_extra, " || ");
+				xstrcat(name_extra, " || ");
+			}
+			xstrfmtcat(assoc_extra, "cluster=\"%s\"", object);
+
+			xstrfmtcat(name_extra, "(name like \"%%\"%s\"%%\""
+				   " || name=\"%s\")", object, object);
+
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(assoc_extra, ")");
+		xstrcat(name_extra, ")");		
+	}
+
+	if(txn_cond->user_list && list_count(txn_cond->user_list)) {
+		set = 0;
+		if(assoc_extra) 
+			xstrcat(assoc_extra, " && (");
+		else
+			xstrcat(assoc_extra, " where (");
+
+		if(name_extra)
+			xstrcat(name_extra, " && (");
+		else	
+			xstrcat(name_extra, "(");
+			
+		itr = list_iterator_create(txn_cond->user_list);
+		while((object = list_next(itr))) {
+			if(set) {
+				xstrcat(assoc_extra, " || ");
+				xstrcat(name_extra, " || ");
+			}
+			xstrfmtcat(assoc_extra, "user=\"%s\"", object);
+
+			xstrfmtcat(name_extra, "(name like \"%%\"%s\"%%\""
+				   " || name=\"%s\")", object, object);
+
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(assoc_extra, ")");
+		xstrcat(name_extra, ")");		
+	}
+
+	if(assoc_extra) {
+		query = xstrdup_printf("select id from %s%s",
+				       assoc_table, assoc_extra);
+		xfree(assoc_extra);
+
+		debug3("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
+		if(!(result = mysql_db_query_ret(
+			     mysql_conn->db_conn, query, 0))) {
+			xfree(query);
+			return NULL;
+		}
+		xfree(query);
+		
+		if(extra)
+			xstrcat(extra, " && (");
+		else
+			xstrcat(extra, " where (");
+
+		set = 0;
+	
+		if(name_extra) {
+			xstrfmtcat(extra, "(%s) || (", name_extra);
+			xfree(name_extra);
+		} else 
+			xstrcat(extra, "(");			
+		
+		while((row = mysql_fetch_row(result))) {
+			if(set) 
+				xstrcat(extra, " || ");
+						
+			xstrfmtcat(extra, "(name like '%%id=%s %%' "
+				   "|| name like '%%id=%s)')", row[0], row[0]);
+			set = 1;
+		}
+		mysql_free_result(result);
+		if(set)
+			xstrcat(extra, "))");
+	}
+	
+	/*******************************************/
+
 	if(txn_cond->action_list && list_count(txn_cond->action_list)) {
 		set = 0;
 		if(extra)
@@ -6096,7 +7478,7 @@ extern List acct_storage_p_get_txn(mysql_conn_t *mysql_conn, uid_t uid,
 		while((object = list_next(itr))) {
 			if(set) 
 				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "action='%s'", object);
+			xstrfmtcat(extra, "action=\"%s\"", object);
 			set = 1;
 		}
 		list_iterator_destroy(itr);
@@ -6113,7 +7495,7 @@ extern List acct_storage_p_get_txn(mysql_conn_t *mysql_conn, uid_t uid,
 		while((object = list_next(itr))) {
 			if(set) 
 				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "actor='%s'", object);
+			xstrfmtcat(extra, "actor=\"%s\"", object);
 			set = 1;
 		}
 		list_iterator_destroy(itr);
@@ -6147,6 +7529,40 @@ extern List acct_storage_p_get_txn(mysql_conn_t *mysql_conn, uid_t uid,
 		xstrcat(extra, ")");
 	}
 
+	if(txn_cond->info_list && list_count(txn_cond->info_list)) {
+		set = 0;
+		if(extra)
+			xstrcat(extra, " && (");
+		else
+			xstrcat(extra, " where (");
+		itr = list_iterator_create(txn_cond->info_list);
+		while((object = list_next(itr))) {
+			if(set) 
+				xstrcat(extra, " || ");
+			xstrfmtcat(extra, "info like '%%%s%%'", object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(extra, ")");
+	}
+	
+	if(txn_cond->name_list && list_count(txn_cond->name_list)) {
+		set = 0;
+		if(extra)
+			xstrcat(extra, " && (");
+		else
+			xstrcat(extra, " where (");
+		itr = list_iterator_create(txn_cond->name_list);
+		while((object = list_next(itr))) {
+			if(set) 
+				xstrcat(extra, " || ");
+			xstrfmtcat(extra, "name like '%%%s%%'", object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(extra, ")");
+	}
+
 	if(txn_cond->time_start && txn_cond->time_end) {
 		if(extra)
 			xstrcat(extra, " && (");
@@ -6168,6 +7584,14 @@ extern List acct_storage_p_get_txn(mysql_conn_t *mysql_conn, uid_t uid,
 			xstrcat(extra, " where (");
 		xstrfmtcat(extra, "timestamp < %d)", txn_cond->time_end);
 	}
+
+	/* make sure we can get the max length out of the database
+	 * when grouping the names
+	 */
+	if(txn_cond->with_assoc_info) 
+		mysql_db_query(mysql_conn->db_conn, 
+			       "set session group_concat_max_len=65536;");
+			
 empty:
 	xfree(tmp);
 	xstrfmtcat(tmp, "%s", txn_req_inx[i]);
@@ -6206,6 +7630,40 @@ empty:
 		txn->set_info = xstrdup(row[TXN_REQ_INFO]);
 		txn->timestamp = atoi(row[TXN_REQ_TS]);
 		txn->where_query = xstrdup(row[TXN_REQ_NAME]);
+
+		if(txn_cond && txn_cond->with_assoc_info
+		   && (txn->action == DBD_ADD_ASSOCS
+		       || txn->action == DBD_MODIFY_ASSOCS
+		       || txn->action == DBD_REMOVE_ASSOCS)) {
+			MYSQL_RES *result2 = NULL;
+			MYSQL_ROW row2;
+			
+			query = xstrdup_printf(
+				"select "
+				"group_concat(distinct user order by user), "
+				"group_concat(distinct acct order by acct), "
+				"group_concat(distinct cluster "
+				"order by cluster) from %s where %s",
+				assoc_table, row[TXN_REQ_NAME]);
+			debug4("%d(%d) query\n%s", mysql_conn->conn, 
+			       __LINE__, query);
+			if(!(result2 = mysql_db_query_ret(
+				     mysql_conn->db_conn, query, 0))) {
+				xfree(query);
+				continue;
+			}
+			xfree(query);
+
+			if((row2 = mysql_fetch_row(result2))) {
+				if(row2[0] && row2[0][0])
+					txn->users = xstrdup(row2[0]);
+				if(row2[1] && row2[1][0])
+					txn->accts = xstrdup(row2[1]);
+				if(row2[2] && row2[2][0])
+					txn->clusters = xstrdup(row2[2]);
+			}
+			mysql_free_result(result2);			
+		}
 	}
 	mysql_free_result(result);
 
@@ -6392,7 +7850,7 @@ is_user:
 		tmp, my_usage_table, assoc_table, assoc_table, end, start,
 		acct_assoc->id);
 	xfree(tmp);
-	debug3("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
+	debug4("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
 	if(!(result = mysql_db_query_ret(
 		     mysql_conn->db_conn, query, 0))) {
 		xfree(query);
@@ -6559,6 +8017,10 @@ extern int acct_storage_p_roll_usage(mysql_conn_t *mysql_conn,
 /* 	info("hour start %s", ctime(&start_time)); */
 /* 	info("hour end %s", ctime(&end_time)); */
 /* 	info("diff is %d", end_time-start_time); */
+	
+	slurm_mutex_lock(&rollup_lock);
+	global_last_rollup = end_time;
+	slurm_mutex_unlock(&rollup_lock);
 
 	if(end_time-start_time > 0) {
 		START_TIMER;
@@ -6691,8 +8153,8 @@ extern int clusteracct_storage_p_node_down(mysql_conn_t *mysql_conn,
 	debug2("inserting %s(%s) with %u cpus", node_ptr->name, cluster, cpus);
 
 	query = xstrdup_printf(
-		"update %s set period_end=%d where cluster='%s' "
-		"and period_end=0 and node_name='%s';",
+		"update %s set period_end=%d where cluster=\"%s\" "
+		"and period_end=0 and node_name=\"%s\";",
 		event_table, event_time, cluster, node_ptr->name);
 	/* If you are clean-restarting the controller over and over again you
 	 * could get records that are duplicates in the database.  If
@@ -6705,10 +8167,11 @@ extern int clusteracct_storage_p_node_down(mysql_conn_t *mysql_conn,
 	xstrfmtcat(query,
 		   "insert into %s "
 		   "(node_name, cluster, cpu_count, period_start, reason) "
-		   "values ('%s', '%s', %u, %d, '%s') on duplicate key "
+		   "values (\"%s\", \"%s\", %u, %d, \"%s\") on duplicate key "
 		   "update period_end=0;",
 		   event_table, node_ptr->name, cluster, 
 		   cpus, event_time, my_reason);
+	debug4("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
 	rc = mysql_db_query(mysql_conn->db_conn, query);
 	xfree(query);
 
@@ -6730,9 +8193,10 @@ extern int clusteracct_storage_p_node_up(mysql_conn_t *mysql_conn,
 		return SLURM_ERROR;
 
 	query = xstrdup_printf(
-		"update %s set period_end=%d where cluster='%s' "
-		"and period_end=0 and node_name='%s';",
+		"update %s set period_end=%d where cluster=\"%s\" "
+		"and period_end=0 and node_name=\"%s\";",
 		event_table, event_time, cluster, node_ptr->name);
+	debug4("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
 	rc = mysql_db_query(mysql_conn->db_conn, query);
 	xfree(query);
 	return rc;
@@ -6755,6 +8219,7 @@ extern int clusteracct_storage_p_cluster_procs(mysql_conn_t *mysql_conn,
 #ifdef HAVE_MYSQL
 	char* query;
 	int rc = SLURM_SUCCESS;
+	int first = 0;
 	MYSQL_RES *result = NULL;
 	MYSQL_ROW row;
 
@@ -6763,8 +8228,8 @@ extern int clusteracct_storage_p_cluster_procs(mysql_conn_t *mysql_conn,
 
 	/* Record the processor count */
 	query = xstrdup_printf(
-		"select cpu_count from %s where cluster='%s' "
-		"and period_end=0 and node_name=''",
+		"select cpu_count from %s where cluster=\"%s\" "
+		"and period_end=0 and node_name='' limit 1",
 		event_table, cluster);
 	if(!(result = mysql_db_query_ret(
 		     mysql_conn->db_conn, query, 0))) {
@@ -6777,6 +8242,21 @@ extern int clusteracct_storage_p_cluster_procs(mysql_conn_t *mysql_conn,
 	if(!(row = mysql_fetch_row(result))) {
 		debug("We don't have an entry for this machine %s "
 		      "most likely a first time running.", cluster);
+
+		/* Get all nodes in a down state and jobs pending or running.
+		 * This is for the first time a cluster registers
+		 *
+		 * This only happens here when calling the plugin directly.  If
+		 * calling this plugin throught the slurmdbd we do this in
+		 * acct_storage_p_modify_clusters.
+		 */
+		if(!slurmdbd_conf) {
+			/* We will return ACCOUNTING_FIRST_REG so this
+			   is taken care of since the message thread
+			   may not be up when we run this in the controller.
+			*/
+			first = 1;
+		}
 		goto add_it;
 	}
 
@@ -6788,7 +8268,7 @@ extern int clusteracct_storage_p_cluster_procs(mysql_conn_t *mysql_conn,
 	debug("%s has changed from %s cpus to %u", cluster, row[0], procs);   
 
 	query = xstrdup_printf(
-		"update %s set period_end=%d where cluster='%s' "
+		"update %s set period_end=%d where cluster=\"%s\" "
 		"and period_end=0 and node_name=''",
 		event_table, event_time, cluster);
 	rc = mysql_db_query(mysql_conn->db_conn, query);
@@ -6798,13 +8278,15 @@ extern int clusteracct_storage_p_cluster_procs(mysql_conn_t *mysql_conn,
 add_it:
 	query = xstrdup_printf(
 		"insert into %s (cluster, cpu_count, period_start, reason) "
-		"values ('%s', %u, %d, 'Cluster processor count')",
+		"values (\"%s\", %u, %d, 'Cluster processor count')",
 		event_table, cluster, procs, event_time);
 	rc = mysql_db_query(mysql_conn->db_conn, query);
 	xfree(query);
-
 end_it:
 	mysql_free_result(result);
+	if(first && rc == SLURM_SUCCESS)
+		rc = ACCOUNTING_FIRST_REG;
+
 	return rc;
 #else
 	return SLURM_ERROR;
@@ -6920,11 +8402,11 @@ extern int clusteracct_storage_p_get_usage(
 
 	query = xstrdup_printf(
 		"select %s from %s where (period_start < %d "
-		"&& period_start >= %d) and cluster='%s'",
+		"&& period_start >= %d) and cluster=\"%s\"",
 		tmp, my_usage_table, end, start, cluster_rec->name);
 
 	xfree(tmp);
-	debug3("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
+	debug4("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
 	if(!(result = mysql_db_query_ret(
 		     mysql_conn->db_conn, query, 0))) {
 		xfree(query);
@@ -6960,16 +8442,18 @@ extern int clusteracct_storage_p_get_usage(
  * load into the storage the start of a job
  */
 extern int jobacct_storage_p_job_start(mysql_conn_t *mysql_conn, 
+				       char *cluster_name,
 				       struct job_record *job_ptr)
 {
 #ifdef HAVE_MYSQL
 	int	rc=SLURM_SUCCESS;
-	char	*jname, *nodes;
+	char	*jname = NULL, *nodes = NULL;
 	long	priority;
 	int track_steps = 0;
 	char *block_id = NULL;
 	char *query = NULL;
 	int reinit = 0;
+	time_t check_time = job_ptr->start_time;
 
 	if (!job_ptr->details || !job_ptr->details->submit_time) {
 		error("jobacct_storage_p_job_start: "
@@ -6981,6 +8465,25 @@ extern int jobacct_storage_p_job_start(mysql_conn_t *mysql_conn,
 		return SLURM_ERROR;
 	
 	debug2("mysql_jobacct_job_start() called");
+	if(!check_time)
+		check_time = job_ptr->details->submit_time;
+ 
+	slurm_mutex_lock(&rollup_lock);
+	if(check_time < global_last_rollup) {
+		global_last_rollup = check_time;
+		slurm_mutex_unlock(&rollup_lock);
+		
+		query = xstrdup_printf("update %s set hourly_rollup=%d, "
+				       "daily_rollup=%d, monthly_rollup=%d",
+				       last_ran_table, check_time,
+				       check_time, check_time);
+		debug3("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
+		rc = mysql_db_query(mysql_conn->db_conn, query);
+		xfree(query);
+	} else
+		slurm_mutex_unlock(&rollup_lock);
+
+
 	priority = (job_ptr->priority == NO_VAL) ?
 		-1L : (long) job_ptr->priority;
 
@@ -7001,7 +8504,7 @@ extern int jobacct_storage_p_job_start(mysql_conn_t *mysql_conn,
 	if (job_ptr->nodes && job_ptr->nodes[0])
 		nodes = job_ptr->nodes;
 	else
-		nodes = "(null)";
+		nodes = "None assigned";
 
 	if(job_ptr->batch_flag)
 		track_steps = 1;
@@ -7017,6 +8520,7 @@ extern int jobacct_storage_p_job_start(mysql_conn_t *mysql_conn,
 	job_ptr->requid = -1; /* force to -1 for sacct to know this
 			       * hasn't been set yet */
 	
+
 	/* We need to put a 0 for 'end' incase of funky job state
 	 * files from a hot start of the controllers we call
 	 * job_start on jobs we may still know about after
@@ -7026,39 +8530,55 @@ extern int jobacct_storage_p_job_start(mysql_conn_t *mysql_conn,
 	if(!job_ptr->db_index) {
 		query = xstrdup_printf(
 			"insert into %s "
-			"(jobid, account, associd, uid, gid, partition, ",
+			"(jobid, associd, uid, gid, nodelist, ",
 			job_table);
 
+		if(cluster_name) 
+			xstrcat(query, "cluster, ");
+		if(job_ptr->account) 
+			xstrcat(query, "account, ");
+		if(job_ptr->partition) 
+			xstrcat(query, "partition, ");
 		if(block_id) 
 			xstrcat(query, "blockid, ");
 		
 		xstrfmtcat(query, 
 			   "eligible, submit, start, name, track_steps, "
-			   "state, priority, req_cpus, alloc_cpus, nodelist) "
-			   "values (%u, '%s', %u, %u, %u, '%s', ",
-			   job_ptr->job_id, job_ptr->account, 
-			   job_ptr->assoc_id,
-			   job_ptr->user_id, job_ptr->group_id,
-			   job_ptr->partition);
+			   "state, priority, req_cpus, alloc_cpus) "
+			   "values (%u, %u, %u, %u, \"%s\", ",
+			   job_ptr->job_id, job_ptr->assoc_id,
+			   job_ptr->user_id, job_ptr->group_id, nodes);
 		
+		if(cluster_name) 
+			xstrfmtcat(query, "\"%s\", ", cluster_name);
+		if(job_ptr->account) 
+			xstrfmtcat(query, "\"%s\", ", job_ptr->account);
+		if(job_ptr->partition) 
+			xstrfmtcat(query, "\"%s\", ", job_ptr->partition);
 		if(block_id) 
-			xstrfmtcat(query, "'%s', ", block_id);
+			xstrfmtcat(query, "\"%s\", ", block_id);
 		
 		xstrfmtcat(query, 
-			   "%d, %d, %d, '%s', %u, %u, %u, %u, %u, '%s') "
+			   "%d, %d, %d, \"%s\", %u, %u, %u, %u, %u) "
 			   "on duplicate key update "
-			   "id=LAST_INSERT_ID(id), end=0, state=%u, "
-			   "partition ='%s', account='%s', associd=%u",
+			   "id=LAST_INSERT_ID(id), state=%u, associd=%u",
 			   (int)job_ptr->details->begin_time,
 			   (int)job_ptr->details->submit_time,
 			   (int)job_ptr->start_time,
 			   jname, track_steps,
 			   job_ptr->job_state & (~JOB_COMPLETING),
 			   priority, job_ptr->num_procs,
-			   job_ptr->total_procs, nodes,
+			   job_ptr->total_procs, 
 			   job_ptr->job_state & (~JOB_COMPLETING),
-			   job_ptr->partition, job_ptr->account, 
 			   job_ptr->assoc_id);
+
+		if(job_ptr->account) 
+			xstrfmtcat(query, ", account=\"%s\"", job_ptr->account);
+		if(job_ptr->partition) 
+			xstrfmtcat(query, ", partition=\"%s\"",
+				   job_ptr->partition);
+		if(block_id)
+			xstrfmtcat(query, ", blockid=\"%s\"", block_id);
 		
 		debug3("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
 	try_again:
@@ -7078,18 +8598,23 @@ extern int jobacct_storage_p_job_start(mysql_conn_t *mysql_conn,
 				rc = SLURM_ERROR;
 		}
 	} else {
-		query = xstrdup_printf(
-			"update %s set partition='%s', ",
-			job_table, job_ptr->partition);
+		query = xstrdup_printf("update %s set nodelist=\"%s\", ", 
+				       job_table, nodes);
+
+		if(job_ptr->account) 
+			xstrfmtcat(query, "account=\"%s\", ",
+				   job_ptr->account);
+		if(job_ptr->partition) 
+			xstrfmtcat(query, "partition=\"%s\", ",
+				   job_ptr->partition);
 		if(block_id)
-			xstrfmtcat(query, "blockid='%s', ", block_id);
-		xstrfmtcat(query, "start=%d, name='%s', state=%u, "
-			   "alloc_cpus=%u, nodelist='%s', "
-			   "account='%s', associd=%u, end=0 where id=%d",
+			xstrfmtcat(query, "blockid=\"%s\", ", block_id);
+
+		xstrfmtcat(query, "start=%d, name=\"%s\", state=%u, "
+			   "alloc_cpus=%u, associd=%d where id=%d",
 			   (int)job_ptr->start_time,
 			   jname, job_ptr->job_state & (~JOB_COMPLETING),
-			   job_ptr->total_procs, nodes, 
-			   job_ptr->account, job_ptr->assoc_id,
+			   job_ptr->total_procs, job_ptr->assoc_id,
 			   job_ptr->db_index);
 		debug3("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
 		rc = mysql_db_query(mysql_conn->db_conn, query);
@@ -7115,7 +8640,7 @@ extern int jobacct_storage_p_job_complete(mysql_conn_t *mysql_conn,
 #ifdef HAVE_MYSQL
 	char *query = NULL, *nodes = NULL;
 	int rc=SLURM_SUCCESS;
-	
+
 	if (!job_ptr->db_index 
 	    && (!job_ptr->details || !job_ptr->details->submit_time)) {
 		error("jobacct_storage_p_job_complete: "
@@ -7126,15 +8651,34 @@ extern int jobacct_storage_p_job_complete(mysql_conn_t *mysql_conn,
 	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
 		return SLURM_ERROR;
 	debug2("mysql_jobacct_job_complete() called");
+	
+	/* If we get an error with this just fall through to avoid an
+	 * infinite loop
+	 */
 	if (job_ptr->end_time == 0) {
 		debug("mysql_jobacct: job %u never started", job_ptr->job_id);
-		return SLURM_ERROR;
+		return SLURM_SUCCESS;
 	}	
 	
+	slurm_mutex_lock(&rollup_lock);
+	if(job_ptr->end_time < global_last_rollup) {
+		global_last_rollup = job_ptr->end_time;
+		slurm_mutex_unlock(&rollup_lock);
+		
+		query = xstrdup_printf("update %s set hourly_rollup=%d, "
+				       "daily_rollup=%d, monthly_rollup=%d",
+				       last_ran_table, job_ptr->end_time,
+				       job_ptr->end_time, job_ptr->end_time);
+		debug3("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
+		rc = mysql_db_query(mysql_conn->db_conn, query);
+		xfree(query);
+	} else
+		slurm_mutex_unlock(&rollup_lock);
+
 	if (job_ptr->nodes && job_ptr->nodes[0])
 		nodes = job_ptr->nodes;
 	else
-		nodes = "(null)";
+		nodes = "None assigned";
 
 	if(!job_ptr->db_index) {
 		if(!(job_ptr->db_index =
@@ -7145,19 +8689,18 @@ extern int jobacct_storage_p_job_complete(mysql_conn_t *mysql_conn,
 			/* If we get an error with this just fall
 			 * through to avoid an infinite loop
 			 */
-			if(jobacct_storage_p_job_start(mysql_conn, job_ptr)
-			   == SLURM_ERROR) {
+			if(jobacct_storage_p_job_start(
+				   mysql_conn, NULL, job_ptr) == SLURM_ERROR) {
 				error("couldn't add job %u at job completion",
 				      job_ptr->job_id);
 				return SLURM_SUCCESS;
 			}
-			jobacct_storage_p_job_start(mysql_conn, job_ptr);
 		}
 	}
 
-	query = xstrdup_printf("update %s set start=%u, end=%u, state=%d, "
-			       "nodelist='%s', comp_code=%u, "
-			       "kill_requid=%u where id=%u",
+	query = xstrdup_printf("update %s set start=%d, end=%d, state=%d, "
+			       "nodelist=\"%s\", comp_code=%d, "
+			       "kill_requid=%d where id=%d",
 			       job_table, (int)job_ptr->start_time,
 			       (int)job_ptr->end_time, 
 			       job_ptr->job_state & (~JOB_COMPLETING),
@@ -7241,8 +8784,8 @@ extern int jobacct_storage_p_step_start(mysql_conn_t *mysql_conn,
 			/* If we get an error with this just fall
 			 * through to avoid an infinite loop
 			 */
-			if(jobacct_storage_p_job_start(mysql_conn,
-						       step_ptr->job_ptr)
+			if(jobacct_storage_p_job_start(
+				   mysql_conn, NULL, step_ptr->job_ptr)
 			   == SLURM_ERROR) {
 				error("couldn't add job %u at step start",
 				      step_ptr->job_ptr->job_id);
@@ -7255,8 +8798,8 @@ extern int jobacct_storage_p_step_start(mysql_conn_t *mysql_conn,
 	query = xstrdup_printf(
 		"insert into %s (id, stepid, start, name, state, "
 		"cpus, nodelist) "
-		"values (%d, %u, %d, '%s', %d, %u, '%s') "
-		"on duplicate key update cpus=%u, end=0, state=%u",
+		"values (%d, %d, %d, \"%s\", %d, %d, \"%s\") "
+		"on duplicate key update cpus=%d, end=0, state=%d",
 		step_table, step_ptr->job_ptr->db_index,
 		step_ptr->step_id, 
 		(int)step_ptr->start_time, step_ptr->name,
@@ -7288,7 +8831,8 @@ extern int jobacct_storage_p_step_complete(mysql_conn_t *mysql_conn,
 	float ave_cpu = 0, ave_cpu2 = 0;
 	char *query = NULL;
 	int rc =SLURM_SUCCESS;
-	
+	uint32_t exit_code = 0;
+
 	if (!step_ptr->job_ptr->db_index 
 	    && (!step_ptr->job_ptr->details
 		|| !step_ptr->job_ptr->details->submit_time)) {
@@ -7325,7 +8869,12 @@ extern int jobacct_storage_p_step_complete(mysql_conn_t *mysql_conn,
 	
 	if ((elapsed=now-step_ptr->start_time)<0)
 		elapsed=0;	/* For *very* short jobs, if clock is wrong */
-	if (step_ptr->exit_code)
+	
+	exit_code = step_ptr->exit_code;
+	if (exit_code == NO_VAL) {
+		comp_status = JOB_CANCELLED;
+		exit_code = 0;
+	} else if (exit_code)
 		comp_status = JOB_FAILED;
 	else
 		comp_status = JOB_COMPLETE;
@@ -7357,7 +8906,7 @@ extern int jobacct_storage_p_step_complete(mysql_conn_t *mysql_conn,
 			/* If we get an error with this just fall
 			 * through to avoid an infinite loop
 			 */
-			if(jobacct_storage_p_job_start(mysql_conn,
+			if(jobacct_storage_p_job_start(mysql_conn, NULL,
 						       step_ptr->job_ptr)
 			   == SLURM_ERROR) {
 				error("couldn't add job %u "
@@ -7370,7 +8919,7 @@ extern int jobacct_storage_p_step_complete(mysql_conn_t *mysql_conn,
 
 	query = xstrdup_printf(
 		"update %s set end=%d, state=%d, "
-		"kill_requid=%u, comp_code=%u, "
+		"kill_requid=%d, comp_code=%d, "
 		"user_sec=%ld, user_usec=%ld, "
 		"sys_sec=%ld, sys_usec=%ld, "
 		"max_vsize=%u, max_vsize_task=%u, "
@@ -7381,11 +8930,11 @@ extern int jobacct_storage_p_step_complete(mysql_conn_t *mysql_conn,
 		"max_pages_node=%u, ave_pages=%.2f, "
 		"min_cpu=%.2f, min_cpu_task=%u, "
 		"min_cpu_node=%u, ave_cpu=%.2f "
-		"where id=%u and stepid=%u",
+		"where id=%d and stepid=%u",
 		step_table, (int)now,
 		comp_status,
 		step_ptr->job_ptr->requid, 
-		step_ptr->exit_code,
+		exit_code,
 		/* user seconds */
 		jobacct->user_cpu_sec,	
 		/* user microseconds */
@@ -7443,8 +8992,8 @@ extern int jobacct_storage_p_suspend(mysql_conn_t *mysql_conn,
 			/* If we get an error with this just fall
 			 * through to avoid an infinite loop
 			 */
-			if(jobacct_storage_p_job_start(mysql_conn, job_ptr)
-			   == SLURM_ERROR) {
+			if(jobacct_storage_p_job_start(
+				   mysql_conn, NULL, job_ptr) == SLURM_ERROR) {
 				error("couldn't suspend job %u",
 				      job_ptr->job_id);
 				return SLURM_SUCCESS;
@@ -7457,7 +9006,7 @@ extern int jobacct_storage_p_suspend(mysql_conn_t *mysql_conn,
 
 	xstrfmtcat(query,
 		   "update %s set suspended=%d-suspended, state=%d "
-		   "where id=%u;",
+		   "where id=%d;",
 		   job_table, (int)job_ptr->suspend_time, 
 		   job_ptr->job_state & (~JOB_COMPLETING),
 		   job_ptr->db_index);
@@ -7599,10 +9148,11 @@ extern int acct_storage_p_flush_jobs_on_cluster(
 	/* First we need to get the id's and states so we can clean up
 	 * the suspend table and the step table 
 	 */
-	query = xstrdup_printf("select t1.id, t1.state from %s as t1, %s as t2 "
-			       "where ((t2.id=t1.associd and t2.cluster='%s') "
-			       "|| !t1.associd) && t1.end=0;",
-			       job_table, assoc_table, cluster);
+	query = xstrdup_printf(
+		"select t1.id, t1.state from %s as t1, %s as t2 "
+		"where ((t2.id=t1.associd and t2.cluster=\"%s\") "
+		"|| !t1.associd) && t1.end=0;",
+		job_table, assoc_table, cluster);
 	if(!(result =
 	     mysql_db_query_ret(mysql_conn->db_conn, query, 0))) {
 		xfree(query);
@@ -7649,7 +9199,7 @@ extern int acct_storage_p_flush_jobs_on_cluster(
 	}
 /* 	query = xstrdup_printf("update %s as t1, %s as t2 set " */
 /* 			       "t1.state=%u, t1.end=%u where " */
-/* 			       "t2.id=t1.associd and t2.cluster='%s' " */
+/* 			       "t2.id=t1.associd and t2.cluster=\"%s\" " */
 /* 			       "&& t1.end=0;", */
 /* 			       job_table, assoc_table, JOB_CANCELLED,  */
 /* 			       event_time, cluster); */
diff --git a/src/plugins/accounting_storage/mysql/mysql_jobacct_process.c b/src/plugins/accounting_storage/mysql/mysql_jobacct_process.c
index 318daf6dc..9344e7f6a 100644
--- a/src/plugins/accounting_storage/mysql/mysql_jobacct_process.c
+++ b/src/plugins/accounting_storage/mysql/mysql_jobacct_process.c
@@ -78,6 +78,7 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn, uid_t uid,
 		"t1.gid",
 		"t1.partition",
 		"t1.blockid",
+		"t1.cluster",
 		"t1.account",
 		"t1.eligible",
 		"t1.submit",
@@ -96,6 +97,7 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn, uid_t uid,
 		"t1.qos",
 		"t2.user",
 		"t2.cluster",
+		"t2.acct",
 		"t2.lft"
 	};
 
@@ -142,7 +144,8 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn, uid_t uid,
 		JOB_REQ_GID,
 		JOB_REQ_PARTITION,
 		JOB_REQ_BLOCKID,
-		JOB_REQ_ACCOUNT,
+		JOB_REQ_CLUSTER1,
+		JOB_REQ_ACCOUNT1,
 		JOB_REQ_ELIGIBLE,
 		JOB_REQ_SUBMIT,
 		JOB_REQ_START,
@@ -160,6 +163,7 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn, uid_t uid,
 		JOB_REQ_QOS,
 		JOB_REQ_USER_NAME,
 		JOB_REQ_CLUSTER,
+		JOB_REQ_ACCOUNT,
 		JOB_REQ_LFT,
 		JOB_REQ_COUNT		
 	};
@@ -259,7 +263,7 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn, uid_t uid,
 		while((object = list_next(itr))) {
 			if(set) 
 				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "t1.acct='%s'", object);
+			xstrfmtcat(extra, "t1.account='%s'", object);
 			set = 1;
 		}
 		list_iterator_destroy(itr);
@@ -379,17 +383,14 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn, uid_t uid,
 		while((object = list_next(itr))) {
 			if(set) 
 				xstrcat(extra, " || ");
-			xstrfmtcat(extra, "%s.cluster='%s'", 
-				   table_level, object);
+			xstrfmtcat(extra, 
+				   "(t1.cluster='%s' || %s.cluster='%s')", 
+				   object, table_level, object);
 			set = 1;
 		}
 		list_iterator_destroy(itr);
-		/* just incase the association is gone */
-		if(set) 
-			xstrcat(extra, " || ");
-		xstrfmtcat(extra, "%s.cluster is null)", table_level);
-	}
-
+		xstrcat(extra, ")");
+	} 
 no_cond:	
 
 	xfree(tmp);
@@ -463,7 +464,7 @@ no_cond:
 	if(job_cond && !job_cond->duplicates) 
 		xstrcat(query, " order by jobid, submit desc");
 
-	debug3("%d query\n%s", mysql_conn->conn, query);
+	debug3("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
 	if(!(result = mysql_db_query_ret(
 		     mysql_conn->db_conn, query, 0))) {
 		xfree(query);
@@ -487,9 +488,11 @@ no_cond:
 		job->alloc_cpus = atoi(row[JOB_REQ_ALLOC_CPUS]);
 		job->associd = atoi(row[JOB_REQ_ASSOCID]);
 
-		if(row[JOB_REQ_CLUSTER])
+		if(row[JOB_REQ_CLUSTER] && row[JOB_REQ_CLUSTER][0])
 			job->cluster = xstrdup(row[JOB_REQ_CLUSTER]);
-
+		else if(row[JOB_REQ_CLUSTER1] && row[JOB_REQ_CLUSTER1][0])
+			job->cluster = xstrdup(row[JOB_REQ_CLUSTER1]);
+			
 		if(row[JOB_REQ_USER_NAME]) 
 			job->user = xstrdup(row[JOB_REQ_USER_NAME]);
 		else 
@@ -498,8 +501,11 @@ no_cond:
 		if(row[JOB_REQ_LFT])
 			job->lft = atoi(row[JOB_REQ_LFT]);
 
-		if(row[JOB_REQ_ACCOUNT])
+		if(row[JOB_REQ_ACCOUNT] && row[JOB_REQ_ACCOUNT][0])
 			job->account = xstrdup(row[JOB_REQ_ACCOUNT]);
+		else if(row[JOB_REQ_ACCOUNT1] && row[JOB_REQ_ACCOUNT1][0])
+			job->account = xstrdup(row[JOB_REQ_ACCOUNT1]);
+
 		if(row[JOB_REQ_BLOCKID])
 			job->blockid = xstrdup(row[JOB_REQ_BLOCKID]);
 
@@ -533,7 +539,8 @@ no_cond:
 					job_cond->usage_start,
 					id);
 				
-				debug4("%d query\n%s", mysql_conn->conn, query);
+				debug4("%d(%d) query\n%s", 
+				       mysql_conn->conn, __LINE__, query);
 				if(!(result2 = mysql_db_query_ret(
 					     mysql_conn->db_conn,
 					     query, 0))) {
@@ -569,11 +576,15 @@ no_cond:
 			}
 		} else {
 			job->suspended = atoi(row[JOB_REQ_SUSPENDED]);
-			if(!job->end) {
+
+			if(!job->start) {
+				job->elapsed = 0;
+			} else if(!job->end) {
 				job->elapsed = now - job->start;
 			} else {
 				job->elapsed = job->end - job->start;
 			}
+
 			job->elapsed -= job->suspended;
 		}
 
@@ -581,8 +592,13 @@ no_cond:
 		job->jobname = xstrdup(row[JOB_REQ_NAME]);
 		job->gid = atoi(row[JOB_REQ_GID]);
 		job->exitcode = atoi(row[JOB_REQ_COMP_CODE]);
-		job->partition = xstrdup(row[JOB_REQ_PARTITION]);
-		job->nodes = xstrdup(row[JOB_REQ_NODELIST]);
+
+		if(row[JOB_REQ_PARTITION])
+			job->partition = xstrdup(row[JOB_REQ_PARTITION]);
+
+		if(row[JOB_REQ_NODELIST])
+			job->nodes = xstrdup(row[JOB_REQ_NODELIST]);
+
 		if (!job->nodes || !strcmp(job->nodes, "(null)")) {
 			xfree(job->nodes);
 			job->nodes = xstrdup("(unknown)");
diff --git a/src/plugins/accounting_storage/mysql/mysql_rollup.c b/src/plugins/accounting_storage/mysql/mysql_rollup.c
index 4c0166589..669ddeb82 100644
--- a/src/plugins/accounting_storage/mysql/mysql_rollup.c
+++ b/src/plugins/accounting_storage/mysql/mysql_rollup.c
@@ -114,7 +114,7 @@ extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 		"t1.id",
 		"jobid",
 		"associd",
-		"cluster",
+		"t2.cluster",
 		"eligible",
 		"start",
 		"end",
@@ -187,7 +187,7 @@ extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 				       event_str, event_table,
 				       curr_end, curr_start);
 
-		debug3("%d query\n%s", mysql_conn->conn, query);
+		debug3("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
 		if(!(result = mysql_db_query_ret(
 			     mysql_conn->db_conn, query, 0))) {
 			xfree(query);
@@ -288,7 +288,7 @@ extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 				       job_str, job_table, assoc_table,
 				       curr_end, curr_start, curr_start);
 
-		debug3("%d query\n%s", mysql_conn->conn, query);
+		debug3("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
 		if(!(result = mysql_db_query_ret(
 			     mysql_conn->db_conn, query, 0))) {
 			xfree(query);
@@ -342,7 +342,8 @@ extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 					curr_end, curr_start,
 					row[JOB_REQ_DB_INX]);
 				
-				debug4("%d query\n%s", mysql_conn->conn, query);
+				debug4("%d(%d) query\n%s",
+				       mysql_conn->conn, __LINE__, query);
 				if(!(result2 = mysql_db_query_ret(
 					     mysql_conn->db_conn,
 					     query, 0))) {
@@ -369,8 +370,7 @@ extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 					
 					seconds -= (local_end - local_start);
 				}
-				mysql_free_result(result2);			
-
+				mysql_free_result(result2);
 			}
 			if(seconds < 1) {
 				debug4("This job (%u) was suspended "
@@ -451,13 +451,13 @@ extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 			 * commit field
 			 */
 			
-			if(c_usage->i_cpu < 0) {
+			if((int64_t)c_usage->i_cpu < 0) {
 /* 				info("got %d %d %d", c_usage->r_cpu, */
 /* 				     c_usage->i_cpu, c_usage->o_cpu); */
 				c_usage->r_cpu += c_usage->i_cpu;
 				c_usage->o_cpu -= c_usage->i_cpu;
 				c_usage->i_cpu = 0;
-				if(c_usage->r_cpu < 0)
+				if((int64_t)c_usage->r_cpu < 0)
 					c_usage->r_cpu = 0;
 			}
 			
@@ -510,6 +510,8 @@ extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 				   "over_cpu_secs=VALUES(over_cpu_secs), "
 				   "resv_cpu_secs=VALUES(resv_cpu_secs)",
 				   now);
+			debug3("%d(%d) query\n%s",
+			       mysql_conn->conn, __LINE__, query);
 			rc = mysql_db_query(mysql_conn->db_conn, query);
 			xfree(query);
 			if(rc != SLURM_SUCCESS) {
@@ -547,7 +549,8 @@ extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 				   "alloc_cpu_secs=VALUES(alloc_cpu_secs)",
 				   now);
 					   	
-			debug3("%d query\n%s", mysql_conn->conn, query);
+			debug3("%d(%d) query\n%s",
+			       mysql_conn->conn, __LINE__, query);
 			rc = mysql_db_query(mysql_conn->db_conn, query);
 			xfree(query);
 			if(rc != SLURM_SUCCESS) {
@@ -632,7 +635,7 @@ extern int mysql_daily_rollup(mysql_conn_t *mysql_conn,
 			   cluster_day_table, now, now, curr_start,
 			   cluster_hour_table,
 			   curr_end, curr_start, now);
-		debug3("%d query\n%s", mysql_conn->conn, query);
+		debug3("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
 		rc = mysql_db_query(mysql_conn->db_conn, query);
 		xfree(query);
 		if(rc != SLURM_SUCCESS) {
@@ -728,7 +731,7 @@ extern int mysql_monthly_rollup(mysql_conn_t *mysql_conn,
 			   cluster_month_table, now, now, curr_start,
 			   cluster_day_table,
 			   curr_end, curr_start, now);
-		debug3("%d query\n%s", mysql_conn->conn, query);
+		debug3("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
 		rc = mysql_db_query(mysql_conn->db_conn, query);
 		xfree(query);
 		if(rc != SLURM_SUCCESS) {
diff --git a/src/plugins/accounting_storage/none/accounting_storage_none.c b/src/plugins/accounting_storage/none/accounting_storage_none.c
index 373cd2203..ab95986c8 100644
--- a/src/plugins/accounting_storage/none/accounting_storage_none.c
+++ b/src/plugins/accounting_storage/none/accounting_storage_none.c
@@ -87,7 +87,8 @@ extern int fini ( void )
 	return SLURM_SUCCESS;
 }
 
-extern void * acct_storage_p_get_connection(bool make_agent, bool rollback)
+extern void * acct_storage_p_get_connection(bool make_agent, int conn_num,
+					    bool rollback)
 {
 	return NULL;
 }
@@ -166,6 +167,13 @@ extern List acct_storage_p_modify_associations(void *db_conn, uint32_t uid,
 	return SLURM_SUCCESS;
 }
 
+extern List acct_storage_p_modify_qos(void *db_conn, uint32_t uid,
+				      acct_qos_cond_t *qos_cond,
+				      acct_qos_rec_t *qos)
+{
+	return SLURM_SUCCESS;
+}
+
 extern List acct_storage_p_remove_users(void *db_conn, uint32_t uid,
 				       acct_user_cond_t *user_q)
 {
@@ -296,7 +304,7 @@ extern int clusteracct_storage_p_get_usage(
 /* 
  * load into the storage the start of a job
  */
-extern int jobacct_storage_p_job_start(void *db_conn,
+extern int jobacct_storage_p_job_start(void *db_conn, char *cluster_name,
 				       struct job_record *job_ptr)
 {
 	return SLURM_SUCCESS;
diff --git a/src/plugins/accounting_storage/pgsql/accounting_storage_pgsql.c b/src/plugins/accounting_storage/pgsql/accounting_storage_pgsql.c
index 26c6e0535..35c4d74f9 100644
--- a/src/plugins/accounting_storage/pgsql/accounting_storage_pgsql.c
+++ b/src/plugins/accounting_storage/pgsql/accounting_storage_pgsql.c
@@ -240,6 +240,7 @@ static int _pgsql_acct_check_tables(PGconn *acct_pgsql_db,
 		{ "associd", "bigint not null" },
 		{ "uid", "smallint not null" },
 		{ "gid", "smallint not null" },
+		{ "cluster", "text" },
 		{ "partition", "text not null" },
 		{ "blockid", "text" },
 		{ "account", "text" },
@@ -700,6 +701,22 @@ extern int init ( void )
 
 	first = 0;
 
+	if(slurmdbd_conf) {
+		error("This plugin is not fully compatible with association "
+		      "logic.  Please use the mysql plugin for full "
+		      "compatiablitly.  If you are interested in filling "
+		      "this plugin in please send email to "
+		      "slurm-dev@lists.llnl.gov. Job accounting without "
+		      "associations will continue to work.");
+	} else {
+		error("This plugin is not fully compatible with association "
+		      "logic.  Please use the mysql or slurmdbd/mysql plugin "
+		      "for full compatiablitly.  If you are interested in "
+		      "filling this plugin in please send email to "
+		      "slurm-dev@lists.llnl.gov.  Job accounting without "
+		      "associations will continue to work.");
+	}
+
 #ifdef HAVE_PGSQL
 	pgsql_db_info = _pgsql_acct_create_db_info();		
 
@@ -752,7 +769,8 @@ extern int fini ( void )
 #endif
 }
 
-extern void *acct_storage_p_get_connection(bool make_agent, bool rollback)
+extern void *acct_storage_p_get_connection(bool make_agent, int conn_num,
+					   bool rollback)
 {
 #ifdef HAVE_PGSQL
 	PGconn *acct_pgsql_db = NULL;
@@ -852,6 +870,13 @@ extern List acct_storage_p_modify_associations(
 	return SLURM_SUCCESS;
 }
 
+extern List acct_storage_p_modify_qos(PGconn *acct_pgsql_db, uint32_t uid,
+				      acct_qos_cond_t *qos_cond,
+				      acct_qos_rec_t *qos)
+{
+	return SLURM_SUCCESS;
+}
+
 extern List acct_storage_p_remove_users(PGconn *acct_pgsql_db, uint32_t uid,
 					acct_user_cond_t *user_cond)
 {
@@ -1098,6 +1123,7 @@ extern int clusteracct_storage_p_get_usage(
  * load into the storage the start of a job
  */
 extern int jobacct_storage_p_job_start(PGconn *acct_pgsql_db, 
+				       char *cluster_name,
 				       struct job_record *job_ptr)
 {
 #ifdef HAVE_PGSQL
@@ -1143,7 +1169,7 @@ extern int jobacct_storage_p_job_start(PGconn *acct_pgsql_db,
 	if (job_ptr->nodes && job_ptr->nodes[0])
 		nodes = job_ptr->nodes;
 	else
-		nodes = "(null)";
+		nodes = "None assigned";
 
 	if(job_ptr->batch_flag)
 		track_steps = 1;
@@ -1161,23 +1187,43 @@ extern int jobacct_storage_p_job_start(PGconn *acct_pgsql_db,
 	if(!job_ptr->db_index) {
 		query = xstrdup_printf(
 			"insert into %s "
-			"(jobid, account, associd, uid, gid, partition, "
-			"blockid, eligible, submit, start, name, track_steps, "
-			"state, priority, req_cpus, alloc_cpus, nodelist) "
-			"values (%u, '%s', %u, %u, %u, '%s', '%s', "
-			"%d, %d, %d, '%s', %u, "
-			"%u, %u, %u, %u, '%s')",
-			job_table, job_ptr->job_id, job_ptr->account, 
-			job_ptr->assoc_id,
-			job_ptr->user_id, job_ptr->group_id,
-			job_ptr->partition, block_id,
-			(int)job_ptr->details->begin_time,
-			(int)job_ptr->details->submit_time,
-			(int)job_ptr->start_time,
-			jname, track_steps,
-			job_ptr->job_state & (~JOB_COMPLETING),
-			priority, job_ptr->num_procs,
-			job_ptr->total_procs, nodes);
+			"(jobid, associd, uid, gid, nodelist, ",
+			job_table);
+
+		if(cluster_name) 
+			xstrcat(query, "cluster, ");
+		if(job_ptr->account) 
+			xstrcat(query, "account, ");
+		if(job_ptr->partition) 
+			xstrcat(query, "partition, ");
+		if(block_id) 
+			xstrcat(query, "blockid, ");
+		
+		xstrfmtcat(query, 
+			   "eligible, submit, start, name, track_steps, "
+			   "state, priority, req_cpus, alloc_cpus) "
+			   "values (%u, %u, %u, %u, '%s', ",
+			   job_ptr->job_id, job_ptr->assoc_id,
+			   job_ptr->user_id, job_ptr->group_id, nodes);
+		
+		if(cluster_name) 
+			xstrfmtcat(query, "'%s', ", cluster_name);
+		if(job_ptr->account) 
+			xstrfmtcat(query, "'%s', ", job_ptr->account);
+		if(job_ptr->partition) 
+			xstrfmtcat(query, "'%s', ", job_ptr->partition);
+		if(block_id) 
+			xstrfmtcat(query, "'%s', ", block_id);
+		
+		xstrfmtcat(query, 
+			   "%d, %d, %d, '%s', %u, %u, %u, %u, %u)",
+			   (int)job_ptr->details->begin_time,
+			   (int)job_ptr->details->submit_time,
+			   (int)job_ptr->start_time,
+			   jname, track_steps,
+			   job_ptr->job_state & (~JOB_COMPLETING),
+			   priority, job_ptr->num_procs,
+			   job_ptr->total_procs);
 	try_again:
 		if(!(job_ptr->db_index = pgsql_insert_ret_id(acct_pgsql_db,  
 							     "job_table_id_seq",
@@ -1195,16 +1241,24 @@ extern int jobacct_storage_p_job_start(PGconn *acct_pgsql_db,
 				rc = SLURM_ERROR;
 		}
 	} else {
-		query = xstrdup_printf(
-			"update %s set partition='%s', blockid='%s', start=%d, "
-			"name='%s', state=%u, alloc_cpus=%u, nodelist='%s', "
-			"account='%s', end=0 where id=%d",
-			job_table, job_ptr->partition, block_id,
-			(int)job_ptr->start_time,
-			jname, 
-			job_ptr->job_state & (~JOB_COMPLETING),
-			job_ptr->total_procs, nodes,
-			job_ptr->account, job_ptr->db_index);
+		query = xstrdup_printf("update %s set nodelist='%s', ", 
+				       job_table, nodes);
+
+		if(job_ptr->account) 
+			xstrfmtcat(query, "account='%s', ",
+				   job_ptr->account);
+		if(job_ptr->partition) 
+			xstrfmtcat(query, "partition='%s', ",
+				   job_ptr->partition);
+		if(block_id)
+			xstrfmtcat(query, "blockid='%s', ", block_id);
+
+		xstrfmtcat(query, "start=%d, name='%s', state=%u, "
+			   "alloc_cpus=%u, associd=%d where id=%d",
+			   (int)job_ptr->start_time,
+			   jname, job_ptr->job_state & (~JOB_COMPLETING),
+			   job_ptr->total_procs, job_ptr->assoc_id,
+			   job_ptr->db_index);
 		rc = pgsql_db_query(acct_pgsql_db, query);
 	}
 	xfree(block_id);
@@ -1250,19 +1304,29 @@ extern int jobacct_storage_p_job_complete(PGconn *acct_pgsql_db,
 	if (job_ptr->nodes && job_ptr->nodes[0])
 		nodes = job_ptr->nodes;
 	else
-		nodes = "(null)";
-
+		nodes = "None assigned";
+	
 	if(!job_ptr->db_index) {
-		job_ptr->db_index = _get_db_index(acct_pgsql_db,
-						  job_ptr->details->submit_time,
-						  job_ptr->job_id,
-						  job_ptr->assoc_id);
-		if(job_ptr->db_index == -1) 
-			return SLURM_ERROR;
+		if(!(job_ptr->db_index =
+		     _get_db_index(acct_pgsql_db,
+				   job_ptr->details->submit_time,
+				   job_ptr->job_id,
+				   job_ptr->assoc_id))) {
+			/* If we get an error with this just fall
+			 * through to avoid an infinite loop
+			 */
+			if(jobacct_storage_p_job_start(
+				   acct_pgsql_db, NULL, job_ptr)
+			   == SLURM_ERROR) {
+				error("couldn't add job %u at job completion",
+				      job_ptr->job_id);
+				return SLURM_SUCCESS;
+			}
+		}
 	}
-	query = xstrdup_printf("update %s set start=%u, endtime=%u, state=%d, "
-			       "nodelist='%s', comp_code=%u, "
-			       "kill_requid=%u where id=%u",
+	query = xstrdup_printf("update %s set start=%d, endtime=%d, state=%d, "
+			       "nodelist='%s', comp_code=%d, "
+			       "kill_requid=%d where id=%d",
 			       job_table, (int)job_ptr->start_time,
 			       (int)job_ptr->end_time, 
 			       job_ptr->job_state & (~JOB_COMPLETING),
@@ -1385,7 +1449,8 @@ extern int jobacct_storage_p_step_complete(PGconn *acct_pgsql_db,
 	float ave_cpu = 0, ave_cpu2 = 0;
 	char *query = NULL;
 	int rc =SLURM_SUCCESS;
-	
+	uint32_t exit_code;
+
 	if (!step_ptr->job_ptr->db_index 
 	    && (!step_ptr->job_ptr->details
 		|| !step_ptr->job_ptr->details->submit_time)) {
@@ -1425,7 +1490,12 @@ extern int jobacct_storage_p_step_complete(PGconn *acct_pgsql_db,
 
 	if ((elapsed=now-step_ptr->start_time)<0)
 		elapsed=0;	/* For *very* short jobs, if clock is wrong */
-	if (step_ptr->exit_code)
+
+	exit_code = step_ptr->exit_code;
+	if (exit_code == NO_VAL) {
+		comp_status = JOB_CANCELLED;
+		exit_code = 0;
+	} else if (exit_code)
 		comp_status = JOB_FAILED;
 	else
 		comp_status = JOB_COMPLETE;
@@ -1475,7 +1545,7 @@ extern int jobacct_storage_p_step_complete(PGconn *acct_pgsql_db,
 		step_table, (int)now,
 		comp_status,
 		step_ptr->job_ptr->requid, 
-		step_ptr->exit_code, 
+		exit_code, 
 		/* user seconds */
 		jobacct->user_cpu_sec,	
 		/* user microseconds */
diff --git a/src/plugins/accounting_storage/pgsql/pgsql_jobacct_process.c b/src/plugins/accounting_storage/pgsql/pgsql_jobacct_process.c
index a534e4d80..7b3bc6ab7 100644
--- a/src/plugins/accounting_storage/pgsql/pgsql_jobacct_process.c
+++ b/src/plugins/accounting_storage/pgsql/pgsql_jobacct_process.c
@@ -74,6 +74,7 @@ extern List pgsql_jobacct_process_get_jobs(PGconn *acct_pgsql_db,
 		"t1.gid",
 		"t1.partition",
 		"t1.blockid",
+		"t1.cluster",
 		"t1.account",
 		"t1.eligible",
 		"t1.submit",
@@ -138,6 +139,7 @@ extern List pgsql_jobacct_process_get_jobs(PGconn *acct_pgsql_db,
 		JOB_REQ_GID,
 		JOB_REQ_PARTITION,
 		JOB_REQ_BLOCKID,
+		JOB_REQ_CLUSTER1,
 		JOB_REQ_ACCOUNT,
 		JOB_REQ_ELIGIBLE,
 		JOB_REQ_SUBMIT,
@@ -212,7 +214,7 @@ extern List pgsql_jobacct_process_get_jobs(PGconn *acct_pgsql_db,
 		table_level="t3";
 		/* just incase the association is gone */
 		if(set) 
-			xstrcat(extra, " || ");
+			xstrcat(extra, " or ");
 		xstrfmtcat(extra, "t3.id is null) and "
 			   "(t2.lft between t3.lft and t3.rgt "
 			   "or t2.lft is null)");
@@ -255,14 +257,14 @@ extern List pgsql_jobacct_process_get_jobs(PGconn *acct_pgsql_db,
 	if(job_cond->userid_list && list_count(job_cond->userid_list)) {
 		set = 0;
 		if(extra)
-			xstrcat(extra, " && (");
+			xstrcat(extra, " and (");
 		else
 			xstrcat(extra, " where (");
 
 		itr = list_iterator_create(job_cond->userid_list);
 		while((object = list_next(itr))) {
 			if(set) 
-				xstrcat(extra, " || ");
+				xstrcat(extra, " or ");
 			xstrfmtcat(extra, "t1.uid='%s'", object);
 			set = 1;
 		}
@@ -321,14 +323,14 @@ extern List pgsql_jobacct_process_get_jobs(PGconn *acct_pgsql_db,
 	if(job_cond->state_list && list_count(job_cond->state_list)) {
 		set = 0;
 		if(extra)
-			xstrcat(extra, " && (");
+			xstrcat(extra, " and (");
 		else
 			xstrcat(extra, " where (");
 
 		itr = list_iterator_create(job_cond->state_list);
 		while((object = list_next(itr))) {
 			if(set) 
-				xstrcat(extra, " || ");
+				xstrcat(extra, " or ");
 			xstrfmtcat(extra, "t1.state='%s'", object);
 			set = 1;
 		}
@@ -348,15 +350,12 @@ extern List pgsql_jobacct_process_get_jobs(PGconn *acct_pgsql_db,
 		while((object = list_next(itr))) {
 			if(set) 
 				xstrcat(extra, " or ");
-			xstrfmtcat(extra, "%s.cluster='%s'", 
-				   table_level, object);
+			xstrfmtcat(extra,
+				   "(t1.cluster='%s' or %s.cluster='%s')", 
+				   object, table_level, object);
 			set = 1;
 		}
 		list_iterator_destroy(itr);
-		/* just incase the association is gone */
-		if(set) 
-			xstrcat(extra, " or ");
-		xstrfmtcat(extra, "%s.cluster is null)", table_level);
 	}
 
 no_cond:	
@@ -405,9 +404,13 @@ no_cond:
 						  JOB_REQ_ALLOC_CPUS));
 		job->associd = atoi(PQgetvalue(result, i, JOB_REQ_ASSOCID));
 		job->cluster = xstrdup(PQgetvalue(result, i, JOB_REQ_CLUSTER));
-		if(job->cluster && !job->cluster[0]) 
+		if(job->cluster && !job->cluster[0]) {
 			xfree(job->cluster);
-
+			job->cluster = xstrdup(
+				PQgetvalue(result, i, JOB_REQ_CLUSTER1));
+			if(job->cluster && !job->cluster[0]) 
+				xfree(job->cluster);
+		}
 		job->user =  xstrdup(PQgetvalue(result, i, JOB_REQ_USER_NAME));
 		if(!job->user || !job->user[0]) 
 			job->uid = atoi(PQgetvalue(result, i, JOB_REQ_UID));
@@ -455,8 +458,8 @@ no_cond:
 				/* get the suspended time for this job */
 				query = xstrdup_printf(
 					"select start, end from %s where "
-					"(start < %d && (end >= %d "
-					"|| end = 0)) && id=%s "
+					"(start < %d and (end >= %d "
+					"and end = 0)) && id=%s "
 					"order by start",
 					suspend_table,
 					job_cond->usage_end, 
diff --git a/src/plugins/accounting_storage/slurmdbd/accounting_storage_slurmdbd.c b/src/plugins/accounting_storage/slurmdbd/accounting_storage_slurmdbd.c
index b003a7a37..38916d9b2 100644
--- a/src/plugins/accounting_storage/slurmdbd/accounting_storage_slurmdbd.c
+++ b/src/plugins/accounting_storage/slurmdbd/accounting_storage_slurmdbd.c
@@ -112,9 +112,8 @@ extern int init ( void )
 			      plugin_name);
 		xfree(cluster_name);
 		slurmdbd_auth_info = slurm_get_accounting_storage_pass();
-		if(!slurmdbd_auth_info)			
-			verbose("%s loaded AuthInfo=%s",
-				plugin_name, slurmdbd_auth_info);
+		verbose("%s loaded with AuthInfo=%s",
+			plugin_name, slurmdbd_auth_info);
 		first = 0;
 	} else {
 		debug4("%s loaded", plugin_name);
@@ -130,7 +129,8 @@ extern int fini ( void )
 	return SLURM_SUCCESS;
 }
 
-extern void *acct_storage_p_get_connection(bool make_agent, bool rollback)
+extern void *acct_storage_p_get_connection(bool make_agent, int conn_num,
+					   bool rollback)
 {
 	if(!slurmdbd_auth_info)	
 		init();
@@ -157,7 +157,8 @@ extern int acct_storage_p_commit(void *db_conn, bool commit)
 
 	req.msg_type = DBD_FINI;
 	req.data = &get_msg;
-	rc = slurm_send_slurmdbd_recv_rc_msg(&req, &resp_code);
+	rc = slurm_send_slurmdbd_recv_rc_msg(SLURMDBD_VERSION,
+					     &req, &resp_code);
 
 	if(resp_code != SLURM_SUCCESS)
 		rc = resp_code;
@@ -175,7 +176,8 @@ extern int acct_storage_p_add_users(void *db_conn, uint32_t uid, List user_list)
 
 	req.msg_type = DBD_ADD_USERS;
 	req.data = &get_msg;
-	rc = slurm_send_slurmdbd_recv_rc_msg(&req, &resp_code);
+	rc = slurm_send_slurmdbd_recv_rc_msg(SLURMDBD_VERSION, 
+					     &req, &resp_code);
 
 	if(resp_code != SLURM_SUCCESS)
 		rc = resp_code;
@@ -195,7 +197,8 @@ extern int acct_storage_p_add_coord(void *db_conn, uint32_t uid,
 
 	req.msg_type = DBD_ADD_ACCOUNT_COORDS;
 	req.data = &get_msg;
-	rc = slurm_send_slurmdbd_recv_rc_msg(&req, &resp_code);
+	rc = slurm_send_slurmdbd_recv_rc_msg(SLURMDBD_VERSION, 
+					     &req, &resp_code);
 
 	if(resp_code != SLURM_SUCCESS)
 		rc = resp_code;
@@ -213,7 +216,8 @@ extern int acct_storage_p_add_accts(void *db_conn, uint32_t uid, List acct_list)
 
 	req.msg_type = DBD_ADD_ACCOUNTS;
 	req.data = &get_msg;
-	rc = slurm_send_slurmdbd_recv_rc_msg(&req, &resp_code);
+	rc = slurm_send_slurmdbd_recv_rc_msg(SLURMDBD_VERSION,
+					     &req, &resp_code);
 
 	if(resp_code != SLURM_SUCCESS)
 		rc = resp_code;
@@ -233,7 +237,8 @@ extern int acct_storage_p_add_clusters(void *db_conn, uint32_t uid,
 	req.msg_type = DBD_ADD_CLUSTERS;
 	req.data = &get_msg;
 
-	rc = slurm_send_slurmdbd_recv_rc_msg(&req, &resp_code);
+	rc = slurm_send_slurmdbd_recv_rc_msg(SLURMDBD_VERSION,
+					     &req, &resp_code);
 
 	if(resp_code != SLURM_SUCCESS) {
 		rc = resp_code;
@@ -252,7 +257,8 @@ extern int acct_storage_p_add_associations(void *db_conn, uint32_t uid,
 
 	req.msg_type = DBD_ADD_ASSOCS;
 	req.data = &get_msg;
-	rc = slurm_send_slurmdbd_recv_rc_msg(&req, &resp_code);
+	rc = slurm_send_slurmdbd_recv_rc_msg(SLURMDBD_VERSION,
+					     &req, &resp_code);
 
 	if(resp_code != SLURM_SUCCESS)
 		rc = resp_code;
@@ -271,7 +277,8 @@ extern int acct_storage_p_add_qos(void *db_conn, uint32_t uid,
 
 	req.msg_type = DBD_ADD_QOS;
 	req.data = &get_msg;
-	rc = slurm_send_slurmdbd_recv_rc_msg(&req, &resp_code);
+	rc = slurm_send_slurmdbd_recv_rc_msg(SLURMDBD_VERSION,
+					     &req, &resp_code);
 	
 	if(resp_code != SLURM_SUCCESS)
 		rc = resp_code;
@@ -294,7 +301,7 @@ extern List acct_storage_p_modify_users(void *db_conn, uint32_t uid,
 
 	req.msg_type = DBD_MODIFY_USERS;
 	req.data = &get_msg;
-	rc = slurm_send_recv_slurmdbd_msg(&req, &resp);
+	rc = slurm_send_recv_slurmdbd_msg(SLURMDBD_VERSION, &req, &resp);
 
 	if (rc != SLURM_SUCCESS)
 		error("slurmdbd: DBD_MODIFY_USERS failure: %m");
@@ -305,7 +312,7 @@ extern List acct_storage_p_modify_users(void *db_conn, uint32_t uid,
 			ret_list = list_create(NULL);
 		} else
 			error("%s", msg->comment);
-		slurmdbd_free_rc_msg(msg);
+		slurmdbd_free_rc_msg(SLURMDBD_VERSION, msg);
 	} else if (resp.msg_type != DBD_GOT_LIST) {
 		error("slurmdbd: response type not DBD_GOT_LIST: %u", 
 		      resp.msg_type);
@@ -313,7 +320,7 @@ extern List acct_storage_p_modify_users(void *db_conn, uint32_t uid,
 		got_msg = (dbd_list_msg_t *) resp.data;
 		ret_list = got_msg->my_list;
 		got_msg->my_list = NULL;
-		slurmdbd_free_list_msg(got_msg);
+		slurmdbd_free_list_msg(SLURMDBD_VERSION, got_msg);
 	}
 
 	return ret_list;
@@ -334,7 +341,7 @@ extern List acct_storage_p_modify_accounts(void *db_conn, uint32_t uid,
 
 	req.msg_type = DBD_MODIFY_ACCOUNTS;
 	req.data = &get_msg;
-	rc = slurm_send_recv_slurmdbd_msg(&req, &resp);
+	rc = slurm_send_recv_slurmdbd_msg(SLURMDBD_VERSION, &req, &resp);
 
 	if (rc != SLURM_SUCCESS)
 		error("slurmdbd: DBD_MODIFY_ACCOUNTS failure: %m");
@@ -345,7 +352,7 @@ extern List acct_storage_p_modify_accounts(void *db_conn, uint32_t uid,
 			ret_list = list_create(NULL);
 		} else
 			error("%s", msg->comment);
-		slurmdbd_free_rc_msg(msg);
+		slurmdbd_free_rc_msg(SLURMDBD_VERSION, msg);
 	} else if (resp.msg_type != DBD_GOT_LIST) {
 		error("slurmdbd: response type not DBD_GOT_LIST: %u", 
 		      resp.msg_type);
@@ -353,7 +360,7 @@ extern List acct_storage_p_modify_accounts(void *db_conn, uint32_t uid,
 		got_msg = (dbd_list_msg_t *) resp.data;
 		ret_list = got_msg->my_list;
 		got_msg->my_list = NULL;
-		slurmdbd_free_list_msg(got_msg);
+		slurmdbd_free_list_msg(SLURMDBD_VERSION, got_msg);
 	}
 
 	return ret_list;
@@ -376,7 +383,7 @@ extern List acct_storage_p_modify_clusters(void *db_conn, uint32_t uid,
 	req.msg_type = DBD_MODIFY_CLUSTERS;
 	req.data = &get_msg;
 
-	rc = slurm_send_recv_slurmdbd_msg(&req, &resp);
+	rc = slurm_send_recv_slurmdbd_msg(SLURMDBD_VERSION, &req, &resp);
 
 	if (rc != SLURM_SUCCESS)
 		error("slurmdbd: DBD_MODIFY_CLUSTERS failure: %m");
@@ -387,7 +394,7 @@ extern List acct_storage_p_modify_clusters(void *db_conn, uint32_t uid,
 			ret_list = list_create(NULL);
 		} else
 			error("%s", msg->comment);
-		slurmdbd_free_rc_msg(msg);
+		slurmdbd_free_rc_msg(SLURMDBD_VERSION, msg);
 	} else if (resp.msg_type != DBD_GOT_LIST) {
 		error("slurmdbd: response type not DBD_GOT_LIST: %u", 
 		      resp.msg_type);
@@ -395,7 +402,7 @@ extern List acct_storage_p_modify_clusters(void *db_conn, uint32_t uid,
 		got_msg = (dbd_list_msg_t *) resp.data;
 		ret_list = got_msg->my_list;
 		got_msg->my_list = NULL;
-		slurmdbd_free_list_msg(got_msg);
+		slurmdbd_free_list_msg(SLURMDBD_VERSION, got_msg);
 	}
 
 	return ret_list;
@@ -419,7 +426,7 @@ extern List acct_storage_p_modify_associations(
 
 	req.msg_type = DBD_MODIFY_ASSOCS;
 	req.data = &get_msg;
-	rc = slurm_send_recv_slurmdbd_msg(&req, &resp);
+	rc = slurm_send_recv_slurmdbd_msg(SLURMDBD_VERSION, &req, &resp);
 
 	if (rc != SLURM_SUCCESS)
 		error("slurmdbd: DBD_MODIFY_ASSOCS failure: %m");
@@ -430,7 +437,7 @@ extern List acct_storage_p_modify_associations(
 			ret_list = list_create(NULL);
 		} else
 			error("%s", msg->comment);
-		slurmdbd_free_rc_msg(msg);
+		slurmdbd_free_rc_msg(SLURMDBD_VERSION, msg);
 	} else if (resp.msg_type != DBD_GOT_LIST) {
 		error("slurmdbd: response type not DBD_GOT_LIST: %u", 
 		      resp.msg_type);
@@ -438,7 +445,47 @@ extern List acct_storage_p_modify_associations(
 		got_msg = (dbd_list_msg_t *) resp.data;
 		ret_list = got_msg->my_list;
 		got_msg->my_list = NULL;
-		slurmdbd_free_list_msg(got_msg);
+		slurmdbd_free_list_msg(SLURMDBD_VERSION, got_msg);
+	}
+
+	return ret_list;
+}
+
+extern List acct_storage_p_modify_qos(void *db_conn, uint32_t uid,
+				      acct_qos_cond_t *qos_cond,
+				      acct_qos_rec_t *qos)
+{
+	slurmdbd_msg_t req, resp;
+	dbd_modify_msg_t get_msg;
+	dbd_list_msg_t *got_msg;
+	List ret_list = NULL;
+	int rc;
+
+	get_msg.cond = qos_cond;
+	get_msg.rec = qos;
+
+	req.msg_type = DBD_MODIFY_QOS;
+	req.data = &get_msg;
+	rc = slurm_send_recv_slurmdbd_msg(SLURMDBD_VERSION, &req, &resp);
+
+	if (rc != SLURM_SUCCESS)
+		error("slurmdbd: DBD_MODIFY_QOS failure: %m");
+	else if (resp.msg_type == DBD_RC) {
+		dbd_rc_msg_t *msg = resp.data;
+		if(msg->return_code == SLURM_SUCCESS) {
+			info("%s", msg->comment);
+			ret_list = list_create(NULL);
+		} else
+			error("%s", msg->comment);
+		slurmdbd_free_rc_msg(SLURMDBD_VERSION, msg);
+	} else if (resp.msg_type != DBD_GOT_LIST) {
+		error("slurmdbd: response type not DBD_GOT_LIST: %u", 
+		      resp.msg_type);
+	} else {
+		got_msg = (dbd_list_msg_t *) resp.data;
+		ret_list = got_msg->my_list;
+		got_msg->my_list = NULL;
+		slurmdbd_free_list_msg(SLURMDBD_VERSION, got_msg);
 	}
 
 	return ret_list;
@@ -459,7 +506,7 @@ extern List acct_storage_p_remove_users(void *db_conn, uint32_t uid,
 
 	req.msg_type = DBD_REMOVE_USERS;
 	req.data = &get_msg;
-	rc = slurm_send_recv_slurmdbd_msg(&req, &resp);
+	rc = slurm_send_recv_slurmdbd_msg(SLURMDBD_VERSION, &req, &resp);
 
 	if (rc != SLURM_SUCCESS)
 		error("slurmdbd: DBD_REMOVE_USERS failure: %m");
@@ -470,7 +517,7 @@ extern List acct_storage_p_remove_users(void *db_conn, uint32_t uid,
 			ret_list = list_create(NULL);
 		} else
 			error("%s", msg->comment);
-		slurmdbd_free_rc_msg(msg);
+		slurmdbd_free_rc_msg(SLURMDBD_VERSION, msg);
 	} else if (resp.msg_type != DBD_GOT_LIST) {
 		error("slurmdbd: response type not DBD_GOT_LIST: %u", 
 		      resp.msg_type);
@@ -478,7 +525,7 @@ extern List acct_storage_p_remove_users(void *db_conn, uint32_t uid,
 		got_msg = (dbd_list_msg_t *) resp.data;
 		ret_list = got_msg->my_list;
 		got_msg->my_list = NULL;
-		slurmdbd_free_list_msg(got_msg);
+		slurmdbd_free_list_msg(SLURMDBD_VERSION, got_msg);
 	}
 
 	return ret_list;
@@ -501,7 +548,7 @@ extern List acct_storage_p_remove_coord(void *db_conn, uint32_t uid,
 
 	req.msg_type = DBD_REMOVE_ACCOUNT_COORDS;
 	req.data = &get_msg;
-	rc = slurm_send_recv_slurmdbd_msg(&req, &resp);
+	rc = slurm_send_recv_slurmdbd_msg(SLURMDBD_VERSION, &req, &resp);
 
 	if (rc != SLURM_SUCCESS)
 		error("slurmdbd: DBD_REMOVE_ACCOUNT_COORDS failure: %m");
@@ -512,7 +559,7 @@ extern List acct_storage_p_remove_coord(void *db_conn, uint32_t uid,
 			ret_list = list_create(NULL);
 		} else
 			error("%s", msg->comment);
-		slurmdbd_free_rc_msg(msg);
+		slurmdbd_free_rc_msg(SLURMDBD_VERSION, msg);
 	} else if (resp.msg_type != DBD_GOT_LIST) {
 		error("slurmdbd: response type not DBD_GOT_LIST: %u", 
 		      resp.msg_type);
@@ -520,7 +567,7 @@ extern List acct_storage_p_remove_coord(void *db_conn, uint32_t uid,
 		got_msg = (dbd_list_msg_t *) resp.data;
 		ret_list = got_msg->my_list;
 		got_msg->my_list = NULL;
-		slurmdbd_free_list_msg(got_msg);
+		slurmdbd_free_list_msg(SLURMDBD_VERSION, got_msg);
 	}
 
 	return ret_list;
@@ -541,7 +588,7 @@ extern List acct_storage_p_remove_accts(void *db_conn, uint32_t uid,
 
 	req.msg_type = DBD_REMOVE_ACCOUNTS;
 	req.data = &get_msg;
-	rc = slurm_send_recv_slurmdbd_msg(&req, &resp);
+	rc = slurm_send_recv_slurmdbd_msg(SLURMDBD_VERSION, &req, &resp);
 
 	if (rc != SLURM_SUCCESS)
 		error("slurmdbd: DBD_REMOVE_ACCTS failure: %m");
@@ -552,7 +599,7 @@ extern List acct_storage_p_remove_accts(void *db_conn, uint32_t uid,
 			ret_list = list_create(NULL);
 		} else
 			error("%s", msg->comment);
-		slurmdbd_free_rc_msg(msg);
+		slurmdbd_free_rc_msg(SLURMDBD_VERSION, msg);
 	} else if (resp.msg_type != DBD_GOT_LIST) {
 		error("slurmdbd: response type not DBD_GOT_LIST: %u", 
 		      resp.msg_type);
@@ -560,7 +607,7 @@ extern List acct_storage_p_remove_accts(void *db_conn, uint32_t uid,
 		got_msg = (dbd_list_msg_t *) resp.data;
 		ret_list = got_msg->my_list;
 		got_msg->my_list = NULL;
-		slurmdbd_free_list_msg(got_msg);
+		slurmdbd_free_list_msg(SLURMDBD_VERSION, got_msg);
 	}
 
 	return ret_list;
@@ -581,7 +628,7 @@ extern List acct_storage_p_remove_clusters(void *db_conn, uint32_t uid,
 
 	req.msg_type = DBD_REMOVE_CLUSTERS;
 	req.data = &get_msg;
-	rc = slurm_send_recv_slurmdbd_msg(&req, &resp);
+	rc = slurm_send_recv_slurmdbd_msg(SLURMDBD_VERSION, &req, &resp);
 
 	if (rc != SLURM_SUCCESS)
 		error("slurmdbd: DBD_REMOVE_CLUSTERS failure: %m");
@@ -592,7 +639,7 @@ extern List acct_storage_p_remove_clusters(void *db_conn, uint32_t uid,
 			ret_list = list_create(NULL);
 		} else
 			error("%s", msg->comment);
-		slurmdbd_free_rc_msg(msg);
+		slurmdbd_free_rc_msg(SLURMDBD_VERSION, msg);
 	} else if (resp.msg_type != DBD_GOT_LIST) {
 		error("slurmdbd: response type not DBD_GOT_LIST: %u", 
 		      resp.msg_type);
@@ -600,7 +647,7 @@ extern List acct_storage_p_remove_clusters(void *db_conn, uint32_t uid,
 		got_msg = (dbd_list_msg_t *) resp.data;
 		ret_list = got_msg->my_list;
 		got_msg->my_list = NULL;
-		slurmdbd_free_list_msg(got_msg);
+		slurmdbd_free_list_msg(SLURMDBD_VERSION, got_msg);
 	}
 
 	return ret_list;
@@ -622,7 +669,7 @@ extern List acct_storage_p_remove_associations(
 
 	req.msg_type = DBD_REMOVE_ASSOCS;
 	req.data = &get_msg;
-	rc = slurm_send_recv_slurmdbd_msg(&req, &resp);
+	rc = slurm_send_recv_slurmdbd_msg(SLURMDBD_VERSION, &req, &resp);
 
 	if (rc != SLURM_SUCCESS)
 		error("slurmdbd: DBD_REMOVE_ASSOCS failure: %m");
@@ -633,7 +680,7 @@ extern List acct_storage_p_remove_associations(
 			ret_list = list_create(NULL);
 		} else
 			error("%s", msg->comment);
-		slurmdbd_free_rc_msg(msg);
+		slurmdbd_free_rc_msg(SLURMDBD_VERSION, msg);
 	} else if (resp.msg_type != DBD_GOT_LIST) {
 		error("slurmdbd: response type not DBD_GOT_LIST: %u", 
 		      resp.msg_type);
@@ -641,7 +688,7 @@ extern List acct_storage_p_remove_associations(
 		got_msg = (dbd_list_msg_t *) resp.data;
 		ret_list = got_msg->my_list;
 		got_msg->my_list = NULL;
-		slurmdbd_free_list_msg(got_msg);
+		slurmdbd_free_list_msg(SLURMDBD_VERSION, got_msg);
 	}
 
 	return ret_list;
@@ -663,7 +710,7 @@ extern List acct_storage_p_remove_qos(
 
 	req.msg_type = DBD_REMOVE_QOS;
 	req.data = &get_msg;
-	rc = slurm_send_recv_slurmdbd_msg(&req, &resp);
+	rc = slurm_send_recv_slurmdbd_msg(SLURMDBD_VERSION, &req, &resp);
 
 	if (rc != SLURM_SUCCESS)
 		error("slurmdbd: DBD_REMOVE_QOS failure: %m");
@@ -674,7 +721,7 @@ extern List acct_storage_p_remove_qos(
 			ret_list = list_create(NULL);
 		} else
 			error("%s", msg->comment);
-		slurmdbd_free_rc_msg(msg);
+		slurmdbd_free_rc_msg(SLURMDBD_VERSION, msg);
 	} else if (resp.msg_type != DBD_GOT_LIST) {
 		error("slurmdbd: response type not DBD_GOT_LIST: %u", 
 		      resp.msg_type);
@@ -682,7 +729,7 @@ extern List acct_storage_p_remove_qos(
 		got_msg = (dbd_list_msg_t *) resp.data;
 		ret_list = got_msg->my_list;
 		got_msg->my_list = NULL;
-		slurmdbd_free_list_msg(got_msg);
+		slurmdbd_free_list_msg(SLURMDBD_VERSION, got_msg);
 	}
 
 	return ret_list;
@@ -701,18 +748,26 @@ extern List acct_storage_p_get_users(void *db_conn, uid_t uid,
 	
 	req.msg_type = DBD_GET_USERS;
 	req.data = &get_msg;
-	rc = slurm_send_recv_slurmdbd_msg(&req, &resp);
+	rc = slurm_send_recv_slurmdbd_msg(SLURMDBD_VERSION, &req, &resp);
 
 	if (rc != SLURM_SUCCESS)
 		error("slurmdbd: DBD_GET_USERS failure: %m");
-	else if (resp.msg_type != DBD_GOT_USERS) {
+	else if (resp.msg_type == DBD_RC) {
+		dbd_rc_msg_t *msg = resp.data;
+		if(msg->return_code == SLURM_SUCCESS) {
+			info("%s", msg->comment);
+			ret_list = list_create(NULL);
+		} else
+			error("%s", msg->comment);
+		slurmdbd_free_rc_msg(SLURMDBD_VERSION, msg);
+	} else if (resp.msg_type != DBD_GOT_USERS) {
 		error("slurmdbd: response type not DBD_GOT_USERS: %u", 
 		      resp.msg_type);
 	} else {
 		got_msg = (dbd_list_msg_t *) resp.data;
 		ret_list = got_msg->my_list;
 		got_msg->my_list = NULL;
-		slurmdbd_free_list_msg(got_msg);
+		slurmdbd_free_list_msg(SLURMDBD_VERSION, got_msg);
 	}
 
 	return ret_list;
@@ -731,18 +786,26 @@ extern List acct_storage_p_get_accts(void *db_conn, uid_t uid,
 	
 	req.msg_type = DBD_GET_ACCOUNTS;
 	req.data = &get_msg;
-	rc = slurm_send_recv_slurmdbd_msg(&req, &resp);
+	rc = slurm_send_recv_slurmdbd_msg(SLURMDBD_VERSION, &req, &resp);
 
 	if (rc != SLURM_SUCCESS)
 		error("slurmdbd: DBD_GET_ACCOUNTS failure: %m");
-	else if (resp.msg_type != DBD_GOT_ACCOUNTS) {
+	else if (resp.msg_type == DBD_RC) {
+		dbd_rc_msg_t *msg = resp.data;
+		if(msg->return_code == SLURM_SUCCESS) {
+			info("%s", msg->comment);
+			ret_list = list_create(NULL);
+		} else
+			error("%s", msg->comment);
+		slurmdbd_free_rc_msg(SLURMDBD_VERSION, msg);
+	} else if (resp.msg_type != DBD_GOT_ACCOUNTS) {
 		error("slurmdbd: response type not DBD_GOT_ACCOUNTS: %u", 
 		      resp.msg_type);
 	} else {
 		got_msg = (dbd_list_msg_t *) resp.data;
 		ret_list = got_msg->my_list;
 		got_msg->my_list = NULL;
-		slurmdbd_free_list_msg(got_msg);
+		slurmdbd_free_list_msg(SLURMDBD_VERSION, got_msg);
 	}
 
 
@@ -762,18 +825,26 @@ extern List acct_storage_p_get_clusters(void *db_conn, uid_t uid,
 	
 	req.msg_type = DBD_GET_CLUSTERS;
 	req.data = &get_msg;
-	rc = slurm_send_recv_slurmdbd_msg(&req, &resp);
+	rc = slurm_send_recv_slurmdbd_msg(SLURMDBD_VERSION, &req, &resp);
 
 	if (rc != SLURM_SUCCESS)
 		error("slurmdbd: DBD_GET_CLUSTERS failure: %m");
-	else if (resp.msg_type != DBD_GOT_CLUSTERS) {
+	else if (resp.msg_type == DBD_RC) {
+		dbd_rc_msg_t *msg = resp.data;
+		if(msg->return_code == SLURM_SUCCESS) {
+			info("%s", msg->comment);
+			ret_list = list_create(NULL);
+		} else
+			error("%s", msg->comment);
+		slurmdbd_free_rc_msg(SLURMDBD_VERSION, msg);
+	} else if (resp.msg_type != DBD_GOT_CLUSTERS) {
 		error("slurmdbd: response type not DBD_GOT_CLUSTERS: %u", 
 		      resp.msg_type);
 	} else {
 		got_msg = (dbd_list_msg_t *) resp.data;
 		ret_list = got_msg->my_list;
 		got_msg->my_list = NULL;
-		slurmdbd_free_list_msg(got_msg);
+		slurmdbd_free_list_msg(SLURMDBD_VERSION, got_msg);
 	}
 
 
@@ -793,18 +864,26 @@ extern List acct_storage_p_get_associations(void *db_conn, uid_t uid,
 	
 	req.msg_type = DBD_GET_ASSOCS;
 	req.data = &get_msg;
-	rc = slurm_send_recv_slurmdbd_msg(&req, &resp);
+	rc = slurm_send_recv_slurmdbd_msg(SLURMDBD_VERSION, &req, &resp);
 
 	if (rc != SLURM_SUCCESS)
 		error("slurmdbd: DBD_GET_ASSOCS failure: %m");
-	else if (resp.msg_type != DBD_GOT_ASSOCS) {
+	else if (resp.msg_type == DBD_RC) {
+		dbd_rc_msg_t *msg = resp.data;
+		if(msg->return_code == SLURM_SUCCESS) {
+			info("%s", msg->comment);
+			ret_list = list_create(NULL);
+		} else
+			error("%s", msg->comment);
+		slurmdbd_free_rc_msg(SLURMDBD_VERSION, msg);
+	} else if (resp.msg_type != DBD_GOT_ASSOCS) {
 		error("slurmdbd: response type not DBD_GOT_ASSOCS: %u", 
 		      resp.msg_type);
 	} else {
 		got_msg = (dbd_list_msg_t *) resp.data;
 		ret_list = got_msg->my_list;
 		got_msg->my_list = NULL;
-		slurmdbd_free_list_msg(got_msg);
+		slurmdbd_free_list_msg(SLURMDBD_VERSION, got_msg);
 	}
 
 	return ret_list;
@@ -823,11 +902,19 @@ extern List acct_storage_p_get_qos(void *db_conn, uid_t uid,
 
 	req.msg_type = DBD_GET_QOS;
 	req.data = &get_msg;
-	rc = slurm_send_recv_slurmdbd_msg(&req, &resp);
+	rc = slurm_send_recv_slurmdbd_msg(SLURMDBD_VERSION, &req, &resp);
 
 	if (rc != SLURM_SUCCESS)
 		error("slurmdbd: DBD_GET_QOS failure: %m");
-	else if (resp.msg_type != DBD_GOT_QOS) {
+	else if (resp.msg_type == DBD_RC) {
+		dbd_rc_msg_t *msg = resp.data;
+		if(msg->return_code == SLURM_SUCCESS) {
+			info("%s", msg->comment);
+			ret_list = list_create(NULL);
+		} else
+			error("%s", msg->comment);
+		slurmdbd_free_rc_msg(SLURMDBD_VERSION, msg);
+	} else if (resp.msg_type != DBD_GOT_QOS) {
 		error("slurmdbd: response type not DBD_GOT_QOS: %u", 
 		      resp.msg_type);
 	} else {
@@ -841,7 +928,7 @@ extern List acct_storage_p_get_qos(void *db_conn, uid_t uid,
 		else 
 			ret_list = got_msg->my_list;
 		got_msg->my_list = NULL;
-		slurmdbd_free_list_msg(got_msg);
+		slurmdbd_free_list_msg(SLURMDBD_VERSION, got_msg);
 	}
 
 	return ret_list;
@@ -860,18 +947,26 @@ extern List acct_storage_p_get_txn(void *db_conn, uid_t uid,
 
 	req.msg_type = DBD_GET_TXN;
 	req.data = &get_msg;
-	rc = slurm_send_recv_slurmdbd_msg(&req, &resp);
+	rc = slurm_send_recv_slurmdbd_msg(SLURMDBD_VERSION, &req, &resp);
 
 	if (rc != SLURM_SUCCESS)
 		error("slurmdbd: DBD_GET_TXN failure: %m");
-	else if (resp.msg_type != DBD_GOT_TXN) {
+	else if (resp.msg_type == DBD_RC) {
+		dbd_rc_msg_t *msg = resp.data;
+		if(msg->return_code == SLURM_SUCCESS) {
+			info("%s", msg->comment);
+			ret_list = list_create(NULL);
+		} else
+			error("%s", msg->comment);
+		slurmdbd_free_rc_msg(SLURMDBD_VERSION, msg);
+	} else if (resp.msg_type != DBD_GOT_TXN) {
 		error("slurmdbd: response type not DBD_GOT_TXN: %u", 
 		      resp.msg_type);
 	} else {
 		got_msg = (dbd_list_msg_t *) resp.data;
 		ret_list = got_msg->my_list;
 		got_msg->my_list = NULL;
-		slurmdbd_free_list_msg(got_msg);
+		slurmdbd_free_list_msg(SLURMDBD_VERSION, got_msg);
 	}
 
 	return ret_list;
@@ -893,11 +988,19 @@ extern int acct_storage_p_get_usage(void *db_conn, uid_t uid,
 	req.msg_type = DBD_GET_ASSOC_USAGE;
 	
 	req.data = &get_msg;
-	rc = slurm_send_recv_slurmdbd_msg(&req, &resp);
+	rc = slurm_send_recv_slurmdbd_msg(SLURMDBD_VERSION, &req, &resp);
 
 	if (rc != SLURM_SUCCESS)
 		error("slurmdbd: DBD_GET_ASSOC_USAGE failure: %m");
-	else if (resp.msg_type != DBD_GOT_ASSOC_USAGE) {
+	else if (resp.msg_type == DBD_RC) {
+		dbd_rc_msg_t *msg = resp.data;
+		if(msg->return_code == SLURM_SUCCESS) {
+			info("%s", msg->comment);
+			acct_assoc->accounting_list = list_create(NULL);
+		} else
+			error("%s", msg->comment);
+		slurmdbd_free_rc_msg(SLURMDBD_VERSION, msg);
+	} else if (resp.msg_type != DBD_GOT_ASSOC_USAGE) {
 		error("slurmdbd: response type not DBD_GOT_ASSOC_USAGE: %u", 
 		      resp.msg_type);
 	} else {
@@ -905,7 +1008,8 @@ extern int acct_storage_p_get_usage(void *db_conn, uid_t uid,
 		got_rec = (acct_association_rec_t *)got_msg->rec;
 		acct_assoc->accounting_list = got_rec->accounting_list;
 		got_rec->accounting_list = NULL;
-		slurmdbd_free_usage_msg(resp.msg_type, got_msg);
+		slurmdbd_free_usage_msg(SLURMDBD_VERSION,
+					resp.msg_type, got_msg);
 	}
 
 
@@ -925,7 +1029,8 @@ extern int acct_storage_p_roll_usage(void *db_conn,
 
 	req.data = &get_msg;
 
-	rc = slurm_send_slurmdbd_recv_rc_msg(&req, &resp_code);
+	rc = slurm_send_slurmdbd_recv_rc_msg(SLURMDBD_VERSION,
+					     &req, &resp_code);
 
 	if(resp_code != SLURM_SUCCESS)
 		rc = resp_code;
@@ -962,7 +1067,7 @@ extern int clusteracct_storage_p_node_down(void *db_conn,
 	msg.msg_type   = DBD_NODE_STATE;
 	msg.data       = &req;
 
-	if (slurm_send_slurmdbd_msg(&msg) < 0)
+	if (slurm_send_slurmdbd_msg(SLURMDBD_VERSION, &msg) < 0)
 		return SLURM_ERROR;
 
 	return SLURM_SUCCESS;
@@ -983,7 +1088,7 @@ extern int clusteracct_storage_p_node_up(void *db_conn,
 	msg.msg_type   = DBD_NODE_STATE;
 	msg.data       = &req;
 
-	if (slurm_send_slurmdbd_msg(&msg) < 0)
+	if (slurm_send_slurmdbd_msg(SLURMDBD_VERSION, &msg) < 0)
 		return SLURM_ERROR;
 
 	return SLURM_SUCCESS;
@@ -1004,7 +1109,7 @@ extern int clusteracct_storage_p_cluster_procs(void *db_conn,
 	msg.msg_type     = DBD_CLUSTER_PROCS;
 	msg.data         = &req;
 
-	if (slurm_send_slurmdbd_msg(&msg) < 0)
+	if (slurm_send_slurmdbd_msg(SLURMDBD_VERSION, &msg) < 0)
 		return SLURM_ERROR;
 
 	return SLURM_SUCCESS;
@@ -1022,7 +1127,7 @@ extern int clusteracct_storage_p_register_ctld(char *cluster,
 	msg.msg_type     = DBD_REGISTER_CTLD;
 	msg.data         = &req;
 
-	if (slurm_send_slurmdbd_msg(&msg) < 0)
+	if (slurm_send_slurmdbd_msg(SLURMDBD_VERSION, &msg) < 0)
 		return SLURM_ERROR;
 
 	return SLURM_SUCCESS;
@@ -1046,11 +1151,19 @@ extern int clusteracct_storage_p_get_usage(
 	req.msg_type = DBD_GET_CLUSTER_USAGE;
 	
 	req.data = &get_msg;
-	rc = slurm_send_recv_slurmdbd_msg(&req, &resp);
+	rc = slurm_send_recv_slurmdbd_msg(SLURMDBD_VERSION, &req, &resp);
 
 	if (rc != SLURM_SUCCESS)
 		error("slurmdbd: DBD_GET_CLUSTER_USAGE failure: %m");
-	else if (resp.msg_type != DBD_GOT_CLUSTER_USAGE) {
+	else if (resp.msg_type == DBD_RC) {
+		dbd_rc_msg_t *msg = resp.data;
+		if(msg->return_code == SLURM_SUCCESS) {
+			info("%s", msg->comment);
+			cluster_rec->accounting_list = list_create(NULL);
+		} else
+			error("%s", msg->comment);
+		slurmdbd_free_rc_msg(SLURMDBD_VERSION, msg);
+	} else if (resp.msg_type != DBD_GOT_CLUSTER_USAGE) {
 		error("slurmdbd: response type not DBD_GOT_CLUSTER_USAGE: %u", 
 		      resp.msg_type);
 	} else {
@@ -1058,7 +1171,8 @@ extern int clusteracct_storage_p_get_usage(
 		got_rec = (acct_cluster_rec_t *)got_msg->rec;
 		cluster_rec->accounting_list = got_rec->accounting_list;
 		got_rec->accounting_list = NULL;
-		slurmdbd_free_usage_msg(resp.msg_type, got_msg);
+		slurmdbd_free_usage_msg(SLURMDBD_VERSION,
+					resp.msg_type, got_msg);
 	}
 
 
@@ -1068,7 +1182,7 @@ extern int clusteracct_storage_p_get_usage(
 /* 
  * load into the storage the start of a job
  */
-extern int jobacct_storage_p_job_start(void *db_conn,
+extern int jobacct_storage_p_job_start(void *db_conn, char *cluster_name,
 				       struct job_record *job_ptr)
 {
 	slurmdbd_msg_t msg, msg_rc;
@@ -1084,6 +1198,7 @@ extern int jobacct_storage_p_job_start(void *db_conn,
 	}
 
 	req.alloc_cpus    = job_ptr->total_procs;
+	req.cluster       = cluster_name;
 	req.account       = job_ptr->account;
 	req.assoc_id      = job_ptr->assoc_id;
 #ifdef HAVE_BG
@@ -1115,7 +1230,7 @@ extern int jobacct_storage_p_job_start(void *db_conn,
 	 * again just send the message 
 	 */
 	if(req.db_index) {
-		if (slurm_send_slurmdbd_msg(&msg) < 0) {
+		if (slurm_send_slurmdbd_msg(SLURMDBD_VERSION, &msg) < 0) {
 			xfree(block_id);
 			return SLURM_ERROR;
 		}
@@ -1126,9 +1241,9 @@ extern int jobacct_storage_p_job_start(void *db_conn,
 	/* If we don't have the db_index we need to wait for it to be
 	 * used in the other submissions for this job.
 	 */
-	rc = slurm_send_recv_slurmdbd_msg(&msg, &msg_rc);
+	rc = slurm_send_recv_slurmdbd_msg(SLURMDBD_VERSION, &msg, &msg_rc);
 	if (rc != SLURM_SUCCESS) {
-		if (slurm_send_slurmdbd_msg(&msg) < 0) {
+		if (slurm_send_slurmdbd_msg(SLURMDBD_VERSION, &msg) < 0) {
 			xfree(block_id);
 			return SLURM_ERROR;
 		}
@@ -1138,7 +1253,7 @@ extern int jobacct_storage_p_job_start(void *db_conn,
 	} else {
 		resp = (dbd_job_start_rc_msg_t *) msg_rc.data;
 		job_ptr->db_index = resp->db_index;
-		slurmdbd_free_job_start_rc_msg(resp);
+		slurmdbd_free_job_start_rc_msg(SLURMDBD_VERSION, resp);
 	}
 	xfree(block_id);
 	
@@ -1175,7 +1290,7 @@ extern int jobacct_storage_p_job_complete(void *db_conn,
 	msg.msg_type    = DBD_JOB_COMPLETE;
 	msg.data        = &req;
 
-	if (slurm_send_slurmdbd_msg(&msg) < 0)
+	if (slurm_send_slurmdbd_msg(SLURMDBD_VERSION, &msg) < 0)
 		return SLURM_ERROR;
 
 	return SLURM_SUCCESS;
@@ -1242,7 +1357,7 @@ extern int jobacct_storage_p_step_start(void *db_conn,
 	msg.msg_type    = DBD_STEP_START;
 	msg.data        = &req;
 
-	if (slurm_send_slurmdbd_msg(&msg) < 0)
+	if (slurm_send_slurmdbd_msg(SLURMDBD_VERSION, &msg) < 0)
 		return SLURM_ERROR;
 
 	return SLURM_SUCCESS;
@@ -1278,7 +1393,8 @@ extern int jobacct_storage_p_step_complete(void *db_conn,
 #else
 	if (!step_ptr->step_layout || !step_ptr->step_layout->task_cnt) {
 		cpus = step_ptr->job_ptr->total_procs;
-		snprintf(node_list, BUFFER_SIZE, "%s", step_ptr->job_ptr->nodes);
+		snprintf(node_list, BUFFER_SIZE, "%s", 
+			 step_ptr->job_ptr->nodes);
 	} else {
 		cpus = step_ptr->step_layout->task_cnt;
 		snprintf(node_list, BUFFER_SIZE, "%s", 
@@ -1310,7 +1426,7 @@ extern int jobacct_storage_p_step_complete(void *db_conn,
 	msg.msg_type    = DBD_STEP_COMPLETE;
 	msg.data        = &req;
 
-	if (slurm_send_slurmdbd_msg(&msg) < 0)
+	if (slurm_send_slurmdbd_msg(SLURMDBD_VERSION, &msg) < 0)
 		return SLURM_ERROR;
 
 	return SLURM_SUCCESS;
@@ -1335,7 +1451,7 @@ extern int jobacct_storage_p_suspend(void *db_conn,
 	msg.msg_type     = DBD_JOB_SUSPEND;
 	msg.data         = &req;
 
-	if (slurm_send_slurmdbd_msg(&msg) < 0)
+	if (slurm_send_slurmdbd_msg(SLURMDBD_VERSION, &msg) < 0)
 		return SLURM_ERROR;
 
 	return SLURM_SUCCESS;
@@ -1374,19 +1490,27 @@ extern List jobacct_storage_p_get_jobs(void *db_conn, uid_t uid,
 
 	req.msg_type = DBD_GET_JOBS;
 	req.data = &get_msg;
-	rc = slurm_send_recv_slurmdbd_msg(&req, &resp);
+	rc = slurm_send_recv_slurmdbd_msg(SLURMDBD_VERSION, &req, &resp);
 	xfree(get_msg.user);
 
 	if (rc != SLURM_SUCCESS)
 		error("slurmdbd: DBD_GET_JOBS failure: %m");
-	else if (resp.msg_type != DBD_GOT_JOBS) {
+	else if (resp.msg_type == DBD_RC) {
+		dbd_rc_msg_t *msg = resp.data;
+		if(msg->return_code == SLURM_SUCCESS) {
+			info("%s", msg->comment);
+			job_list = list_create(NULL);
+		} else
+			error("%s", msg->comment);
+		slurmdbd_free_rc_msg(SLURMDBD_VERSION, msg);
+	} else if (resp.msg_type != DBD_GOT_JOBS) {
 		error("slurmdbd: response type not DBD_GOT_JOBS: %u", 
 		      resp.msg_type);
 	} else {
 		got_msg = (dbd_list_msg_t *) resp.data;
 		job_list = got_msg->my_list;
 		got_msg->my_list = NULL;
-		slurmdbd_free_list_msg(got_msg);
+		slurmdbd_free_list_msg(SLURMDBD_VERSION, got_msg);
 	}
 
 	return job_list;
@@ -1410,18 +1534,26 @@ extern List jobacct_storage_p_get_jobs_cond(void *db_conn, uid_t uid,
 
 	req.msg_type = DBD_GET_JOBS_COND;
 	req.data = &get_msg;
-	rc = slurm_send_recv_slurmdbd_msg(&req, &resp);
+	rc = slurm_send_recv_slurmdbd_msg(SLURMDBD_VERSION, &req, &resp);
 
 	if (rc != SLURM_SUCCESS)
 		error("slurmdbd: DBD_GET_JOBS_COND failure: %m");
-	else if (resp.msg_type != DBD_GOT_JOBS) {
+	else if (resp.msg_type == DBD_RC) {
+		dbd_rc_msg_t *msg = resp.data;
+		if(msg->return_code == SLURM_SUCCESS) {
+			info("%s", msg->comment);
+			job_list = list_create(NULL);
+		} else
+			error("%s", msg->comment);
+		slurmdbd_free_rc_msg(SLURMDBD_VERSION, msg);
+	} else if (resp.msg_type != DBD_GOT_JOBS) {
 		error("slurmdbd: response type not DBD_GOT_JOBS: %u", 
 		      resp.msg_type);
 	} else {
 		got_msg = (dbd_list_msg_t *) resp.data;
 		job_list = got_msg->my_list;
 		got_msg->my_list = NULL;
-		slurmdbd_free_list_msg(got_msg);
+		slurmdbd_free_list_msg(SLURMDBD_VERSION, got_msg);
 	}
 
 	return job_list;
@@ -1449,7 +1581,8 @@ extern int acct_storage_p_update_shares_used(void *db_conn,
 
 	req.msg_type = DBD_UPDATE_SHARES_USED;
 	req.data = &shares_used_msg;
-	rc = slurm_send_slurmdbd_recv_rc_msg(&req, &resp_code);
+	rc = slurm_send_slurmdbd_recv_rc_msg(SLURMDBD_VERSION,
+					     &req, &resp_code);
 
 	if(resp_code != SLURM_SUCCESS)
 		rc = resp_code;
@@ -1471,7 +1604,7 @@ extern int acct_storage_p_flush_jobs_on_cluster(void *db_conn, char *cluster,
 	msg.msg_type     = DBD_FLUSH_JOBS;
 	msg.data         = &req;
 
-	if (slurm_send_slurmdbd_msg(&msg) < 0)
+	if (slurm_send_slurmdbd_msg(SLURMDBD_VERSION, &msg) < 0)
 		return SLURM_ERROR;
 
 	return SLURM_SUCCESS;
diff --git a/src/plugins/jobcomp/filetxt/jobcomp_filetxt.c b/src/plugins/jobcomp/filetxt/jobcomp_filetxt.c
index 77b558162..6180f24d1 100644
--- a/src/plugins/jobcomp/filetxt/jobcomp_filetxt.c
+++ b/src/plugins/jobcomp/filetxt/jobcomp_filetxt.c
@@ -56,6 +56,8 @@
 #include "src/common/uid.h"
 #include "filetxt_jobcomp_process.h"
 
+#define USE_ISO8601 1
+
 /*
  * These variables are required by the generic plugin interface.  If they
  * are not found in the plugin, the plugin loader will ignore it.
@@ -208,6 +210,36 @@ extern int slurm_jobcomp_set_location ( char * location )
 	return rc;
 }
 
+/* This is a variation of slurm_make_time_str() in src/common/parse_time.h
+ * This version uses ISO8601 format by default. */
+static void _make_time_str (time_t *time, char *string, int size)
+{
+	struct tm time_tm;
+
+	localtime_r(time, &time_tm);
+	if ( *time == (time_t) 0 ) {
+		snprintf(string, size, "Unknown");
+	} else {
+#if USE_ISO8601
+		/* Format YYYY-MM-DDTHH:MM:SS, ISO8601 standard format,
+		 * NOTE: This is expected to break Maui, Moab and LSF
+		 * schedulers management of SLURM. */
+		snprintf(string, size,
+			"%4.4u-%2.2u-%2.2uT%2.2u:%2.2u:%2.2u",
+			(time_tm.tm_year + 1900), (time_tm.tm_mon+1), 
+			time_tm.tm_mday, time_tm.tm_hour, time_tm.tm_min, 
+			time_tm.tm_sec);
+#else
+		/* Format MM/DD-HH:MM:SS */
+		snprintf(string, size,
+			"%2.2u/%2.2u-%2.2u:%2.2u:%2.2u",
+			(time_tm.tm_mon+1), time_tm.tm_mday,
+			time_tm.tm_hour, time_tm.tm_min, time_tm.tm_sec);
+
+#endif
+	}
+}
+
 extern int slurm_jobcomp_log_record ( struct job_record *job_ptr )
 {
 	int rc = SLURM_SUCCESS;
@@ -236,9 +268,8 @@ extern int slurm_jobcomp_log_record ( struct job_record *job_ptr )
 	 * JOB_FAILED, JOB_TIMEOUT, etc. */
 	job_state = job_ptr->job_state & (~JOB_COMPLETING);
 
-	slurm_make_time_str(&(job_ptr->start_time),
-			    start_str, sizeof(start_str));
-	slurm_make_time_str(&(job_ptr->end_time), end_str, sizeof(end_str));
+	_make_time_str(&(job_ptr->start_time), start_str, sizeof(start_str));
+	_make_time_str(&(job_ptr->end_time), end_str, sizeof(end_str));
 
 	select_g_sprint_jobinfo(job_ptr->select_jobinfo,
 		select_buf, sizeof(select_buf), SELECT_PRINT_MIXED);
diff --git a/src/plugins/jobcomp/mysql/jobcomp_mysql.c b/src/plugins/jobcomp/mysql/jobcomp_mysql.c
index 98fa01f33..8911392d5 100644
--- a/src/plugins/jobcomp/mysql/jobcomp_mysql.c
+++ b/src/plugins/jobcomp/mysql/jobcomp_mysql.c
@@ -99,7 +99,6 @@ storage_field_t jobcomp_table_fields[] = {
 	{ "nodelist", "text" }, 
 	{ "nodecnt", "mediumint unsigned not null" },
 	{ "proc_cnt", "mediumint unsigned not null" },
-#ifdef HAVE_BG
 	{ "connect_type", "tinytext" },
 	{ "reboot", "tinytext" },
 	{ "rotate", "tinytext" },
@@ -107,7 +106,6 @@ storage_field_t jobcomp_table_fields[] = {
 	{ "geometry", "tinytext" },
 	{ "start", "tinytext" },
 	{ "blockid", "tinytext" },
-#endif
 	{ NULL, NULL}
 };
 
@@ -300,17 +298,11 @@ extern int slurm_jobcomp_log_record(struct job_record *job_ptr)
 #ifdef HAVE_MYSQL
 	int rc = SLURM_SUCCESS;
 	char *usr_str = NULL, *grp_str = NULL, lim_str[32];
-#ifdef HAVE_BG
-	char connect_type[128];
-	char reboot[4];
-	char rotate[4];
-	char maxprocs[20];
-	char geometry[20];
-	char start[20];
-	char blockid[128];
-#endif
+	char *connect_type = NULL, *reboot = NULL, *rotate = NULL,
+		*maxprocs = NULL, *geometry = NULL, *start = NULL,
+		*blockid = NULL;
 	enum job_states job_state;
-	char query[1024];
+	char *query = NULL;
 
 	if(!jobcomp_mysql_db || mysql_ping(jobcomp_mysql_db) != 0) {
 		char *loc = slurm_get_jobcomp_loc();
@@ -334,47 +326,84 @@ extern int slurm_jobcomp_log_record(struct job_record *job_ptr)
 	 * JOB_FAILED, JOB_TIMEOUT, etc. */
 	job_state = job_ptr->job_state & (~JOB_COMPLETING);
 
-#ifdef HAVE_BG
-	select_g_sprint_jobinfo(job_ptr->select_jobinfo,
-		connect_type, sizeof(connect_type), SELECT_PRINT_CONNECTION);
-	select_g_sprint_jobinfo(job_ptr->select_jobinfo,
-		reboot, sizeof(reboot), SELECT_PRINT_REBOOT);
-	select_g_sprint_jobinfo(job_ptr->select_jobinfo,
-		rotate, sizeof(rotate), SELECT_PRINT_ROTATE);
-	select_g_sprint_jobinfo(job_ptr->select_jobinfo,
-		maxprocs, sizeof(maxprocs), SELECT_PRINT_MAX_PROCS);
-	select_g_sprint_jobinfo(job_ptr->select_jobinfo,
-		geometry, sizeof(geometry), SELECT_PRINT_GEOMETRY);
-	select_g_sprint_jobinfo(job_ptr->select_jobinfo,
-		start, sizeof(start), SELECT_PRINT_START);
-	select_g_sprint_jobinfo(job_ptr->select_jobinfo,
-		blockid, sizeof(blockid), SELECT_PRINT_BG_ID);
-#endif
-	snprintf(query, sizeof(query),
-		 "insert into %s (jobid, uid, user_name, gid, group_name, "
-		 "name, state, proc_cnt, "
-		 "partition, timelimit, starttime, endtime, nodelist, nodecnt"
-#ifdef HAVE_BG
-		 ", connect_type, reboot, rotate, maxprocs, geometry, "
-		 "start, blockid"
-#endif
-		 ") values (%u, %u, '%s', %u, '%s', '%s', %d, %u, "
-		 "'%s', '%s', %u, %u, '%s', %u"
-#ifdef HAVE_BG
-		 ", '%s', '%s', '%s', %s, '%s', '%s', '%s'"
-#endif
-		 ")",
-		 jobcomp_table, job_ptr->job_id, job_ptr->user_id, usr_str,
-		 job_ptr->group_id, grp_str, job_ptr->name,
-		 job_state, job_ptr->total_procs, job_ptr->partition, lim_str,
-		 (int)job_ptr->start_time, (int)job_ptr->end_time,
-		 job_ptr->nodes, job_ptr->node_cnt
-#ifdef HAVE_BG
-		 , connect_type, reboot, rotate, maxprocs, geometry,
-		 start, blockid
-#endif
-		);
-//	info("query = %s", query);
+	connect_type = select_g_xstrdup_jobinfo(job_ptr->select_jobinfo,
+						SELECT_PRINT_CONNECTION);
+	reboot = select_g_xstrdup_jobinfo(job_ptr->select_jobinfo,
+					  SELECT_PRINT_REBOOT);
+	rotate = select_g_xstrdup_jobinfo(job_ptr->select_jobinfo,
+					  SELECT_PRINT_ROTATE);
+	maxprocs = select_g_xstrdup_jobinfo(job_ptr->select_jobinfo,
+					    SELECT_PRINT_MAX_PROCS);
+	geometry = select_g_xstrdup_jobinfo(job_ptr->select_jobinfo,
+					    SELECT_PRINT_GEOMETRY);
+	start = select_g_xstrdup_jobinfo(job_ptr->select_jobinfo,
+					 SELECT_PRINT_START);
+	blockid = select_g_xstrdup_jobinfo(job_ptr->select_jobinfo,
+					   SELECT_PRINT_BG_ID);
+
+	query = xstrdup_printf(
+		"insert into %s (jobid, uid, user_name, gid, group_name, "
+		"name, state, proc_cnt, partition, timelimit, "
+		"starttime, endtime, nodecnt",
+		jobcomp_table);
+
+	if(job_ptr->nodes)
+		xstrcat(query, ", nodelist");		
+	if(connect_type)
+		xstrcat(query, ", connect_type");
+	if(reboot)
+		xstrcat(query, ", reboot");
+	if(rotate)
+		xstrcat(query, ", rotate");
+	if(maxprocs)
+		xstrcat(query, ", maxprocs");
+	if(geometry)
+		xstrcat(query, ", geometry");
+	if(start)
+		xstrcat(query, ", start");
+	if(blockid)
+		xstrcat(query, ", blockid");
+	xstrfmtcat(query, ") values (%u, %u, '%s', %u, '%s', \"%s\", %d, %u, "
+		   "'%s', \"%s\", %u, %u, %u",
+		   job_ptr->job_id, job_ptr->user_id, usr_str,
+		   job_ptr->group_id, grp_str, job_ptr->name,
+		   job_state, job_ptr->total_procs, job_ptr->partition, lim_str,
+		   (int)job_ptr->start_time, (int)job_ptr->end_time,
+		   job_ptr->node_cnt);
+	
+	if(job_ptr->nodes)
+		xstrfmtcat(query, ", '%s'", job_ptr->nodes);		
+
+	if(connect_type) {
+		xstrfmtcat(query, ", '%s'", connect_type);
+		xfree(connect_type);
+	}
+	if(reboot) {
+		xstrfmtcat(query, ", '%s'", reboot);
+		xfree(reboot);
+	}
+	if(rotate) {
+		xstrfmtcat(query, ", '%s'", rotate);
+		xfree(rotate);
+	}
+	if(maxprocs) {
+		xstrfmtcat(query, ", '%s'", maxprocs);
+		xfree(maxprocs);
+	}
+	if(geometry) {
+		xstrfmtcat(query, ", '%s'", geometry);
+		xfree(geometry);
+	}
+	if(start) {
+		xstrfmtcat(query, ", '%s'", start);
+		xfree(start);
+	}
+	if(blockid) {
+		xstrfmtcat(query, ", '%s'", blockid);
+		xfree(blockid);
+	}
+	xstrcat(query, ")");
+	//info("query = %s", query);
 	rc = mysql_db_query(jobcomp_mysql_db, query);
 	xfree(usr_str);
 	xfree(grp_str);
diff --git a/src/plugins/jobcomp/mysql/mysql_jobcomp_process.c b/src/plugins/jobcomp/mysql/mysql_jobcomp_process.c
index e6c51d811..9e29ee2c1 100644
--- a/src/plugins/jobcomp/mysql/mysql_jobcomp_process.c
+++ b/src/plugins/jobcomp/mysql/mysql_jobcomp_process.c
@@ -180,7 +180,6 @@ extern List mysql_jobcomp_process_get_jobs(List selected_steps,
 			job->state = xstrdup(job_state_string(i));
 		}
 		job->timelimit = xstrdup(row[JOBCOMP_REQ_TIMELIMIT]);
-#ifdef HAVE_BG
 		if(row[JOBCOMP_REQ_MAXPROCS])
 			job->max_procs = atoi(row[JOBCOMP_REQ_MAXPROCS]);
 		job->connection = xstrdup(row[JOBCOMP_REQ_CONNECTION]);
@@ -189,7 +188,6 @@ extern List mysql_jobcomp_process_get_jobs(List selected_steps,
 		job->geo = xstrdup(row[JOBCOMP_REQ_GEOMETRY]);
 		job->bg_start_point = xstrdup(row[JOBCOMP_REQ_START]);
 		job->blockid = xstrdup(row[JOBCOMP_REQ_BLOCKID]);
-#endif
 		list_append(job_list, job);
 	}
 		
diff --git a/src/plugins/jobcomp/mysql/mysql_jobcomp_process.h b/src/plugins/jobcomp/mysql/mysql_jobcomp_process.h
index 787b098c4..ed42d4b71 100644
--- a/src/plugins/jobcomp/mysql/mysql_jobcomp_process.h
+++ b/src/plugins/jobcomp/mysql/mysql_jobcomp_process.h
@@ -71,7 +71,6 @@ enum {
 	JOBCOMP_REQ_ENDTIME,
 	JOBCOMP_REQ_NODELIST,
 	JOBCOMP_REQ_NODECNT,
-#ifdef HAVE_BG
 	JOBCOMP_REQ_CONNECTION,
 	JOBCOMP_REQ_REBOOT,
 	JOBCOMP_REQ_ROTATE,
@@ -79,7 +78,6 @@ enum {
 	JOBCOMP_REQ_GEOMETRY,
 	JOBCOMP_REQ_START,
 	JOBCOMP_REQ_BLOCKID,
-#endif
 	JOBCOMP_REQ_COUNT		
 };
 
diff --git a/src/plugins/jobcomp/pgsql/jobcomp_pgsql.c b/src/plugins/jobcomp/pgsql/jobcomp_pgsql.c
index a4a3438f5..6f53bc0df 100644
--- a/src/plugins/jobcomp/pgsql/jobcomp_pgsql.c
+++ b/src/plugins/jobcomp/pgsql/jobcomp_pgsql.c
@@ -99,7 +99,6 @@ storage_field_t jobcomp_table_fields[] = {
 	{ "nodelist", "text" }, 
 	{ "nodecnt", "integer not null" },
 	{ "proc_cnt", "integer not null" },
-#ifdef HAVE_BG
 	{ "connect_type", "text" },
 	{ "reboot", "text" },
 	{ "rotate", "text" },
@@ -107,7 +106,6 @@ storage_field_t jobcomp_table_fields[] = {
 	{ "geometry", "text" },
 	{ "start", "text" },
 	{ "blockid", "text" },
-#endif
 	{ NULL, NULL}
 };
 
@@ -323,17 +321,11 @@ extern int slurm_jobcomp_log_record(struct job_record *job_ptr)
 #ifdef HAVE_PGSQL
 	int rc = SLURM_SUCCESS;
 	char *usr_str = NULL, *grp_str = NULL, lim_str[32];
-#ifdef HAVE_BG
-	char connect_type[128];
-	char reboot[4];
-	char rotate[4];
-	char maxprocs[20];
-	char geometry[20];
-	char start[20];
-	char blockid[128];
-#endif
+	char *connect_type = NULL, *reboot = NULL, *rotate = NULL,
+		*maxprocs = NULL, *geometry = NULL, *start = NULL,
+		*blockid = NULL;
 	enum job_states job_state;
-	char query[1024];
+	char *query = NULL;
 
 	if(!jobcomp_pgsql_db || PQstatus(jobcomp_pgsql_db) != CONNECTION_OK) {
 		char *loc = slurm_get_jobcomp_loc();
@@ -356,47 +348,85 @@ extern int slurm_jobcomp_log_record(struct job_record *job_ptr)
 	 * We remove this flag to get the eventual completion state:
 	 * JOB_FAILED, JOB_TIMEOUT, etc. */
 	job_state = job_ptr->job_state & (~JOB_COMPLETING);
+	
+	connect_type = select_g_xstrdup_jobinfo(job_ptr->select_jobinfo,
+						SELECT_PRINT_CONNECTION);
+	reboot = select_g_xstrdup_jobinfo(job_ptr->select_jobinfo,
+					  SELECT_PRINT_REBOOT);
+	rotate = select_g_xstrdup_jobinfo(job_ptr->select_jobinfo,
+					  SELECT_PRINT_ROTATE);
+	maxprocs = select_g_xstrdup_jobinfo(job_ptr->select_jobinfo,
+					    SELECT_PRINT_MAX_PROCS);
+	geometry = select_g_xstrdup_jobinfo(job_ptr->select_jobinfo,
+					    SELECT_PRINT_GEOMETRY);
+	start = select_g_xstrdup_jobinfo(job_ptr->select_jobinfo,
+					 SELECT_PRINT_START);
+	blockid = select_g_xstrdup_jobinfo(job_ptr->select_jobinfo,
+					   SELECT_PRINT_BG_ID);
+
+	query = xstrdup_printf(
+		"insert into %s (jobid, uid, user_name, gid, group_name, "
+		"name, state, proc_cnt, partition, timelimit, "
+		"starttime, endtime, nodecnt",
+		jobcomp_table);
+
+	if(job_ptr->nodes)
+		xstrcat(query, ", nodelist");		
+	if(connect_type)
+		xstrcat(query, ", connect_type");
+	if(reboot)
+		xstrcat(query, ", reboot");
+	if(rotate)
+		xstrcat(query, ", rotate");
+	if(maxprocs)
+		xstrcat(query, ", maxprocs");
+	if(geometry)
+		xstrcat(query, ", geometry");
+	if(start)
+		xstrcat(query, ", start");
+	if(blockid)
+		xstrcat(query, ", blockid");
+
+	xstrfmtcat(query, ") values (%u, %u, '%s', %u, '%s', \"%s\", %d, %u, "
+		   "'%s', \"%s\", %u, %u,  %u",
+		   job_ptr->job_id, job_ptr->user_id, usr_str,
+		   job_ptr->group_id, grp_str, job_ptr->name,
+		   job_state, job_ptr->total_procs, job_ptr->partition, lim_str,
+		   (int)job_ptr->start_time, (int)job_ptr->end_time,
+		   job_ptr->node_cnt);
+	
+	if(job_ptr->nodes)
+		xstrfmtcat(query, ", '%s'", job_ptr->nodes);		
 
-#ifdef HAVE_BG
-	select_g_sprint_jobinfo(job_ptr->select_jobinfo,
-		connect_type, sizeof(connect_type), SELECT_PRINT_CONNECTION);
-	select_g_sprint_jobinfo(job_ptr->select_jobinfo,
-		reboot, sizeof(reboot), SELECT_PRINT_REBOOT);
-	select_g_sprint_jobinfo(job_ptr->select_jobinfo,
-		rotate, sizeof(rotate), SELECT_PRINT_ROTATE);
-	select_g_sprint_jobinfo(job_ptr->select_jobinfo,
-		maxprocs, sizeof(maxprocs), SELECT_PRINT_MAX_PROCS);
-	select_g_sprint_jobinfo(job_ptr->select_jobinfo,
-		geometry, sizeof(geometry), SELECT_PRINT_GEOMETRY);
-	select_g_sprint_jobinfo(job_ptr->select_jobinfo,
-		start, sizeof(start), SELECT_PRINT_START);
-	select_g_sprint_jobinfo(job_ptr->select_jobinfo,
-		blockid, sizeof(blockid), SELECT_PRINT_BG_ID);
-#endif
-	snprintf(query, sizeof(query),
-		 "insert into %s (jobid, uid, user_name, gid, group_name, "
-		 "name, state, proc_cnt, "
-		 "partition, timelimit, starttime, endtime, nodelist, nodecnt"
-#ifdef HAVE_BG
-		 ", connect_type, reboot, rotate, maxprocs, geometry, "
-		 "start, blockid"
-#endif
-		 ") values (%u, %u, '%s', %u, '%s', '%s', %d, %u, "
-		 "'%s', '%s', %u, %u, '%s', %u"
-#ifdef HAVE_BG
-		 ", '%s', '%s', '%s', %s, '%s', '%s', '%s'"
-#endif
-		 ")",
-		 jobcomp_table, job_ptr->job_id, job_ptr->user_id, usr_str,
-		 job_ptr->group_id, grp_str, job_ptr->name, job_state,
-		 job_ptr->total_procs, job_ptr->partition, lim_str,
-		 (int)job_ptr->start_time, (int)job_ptr->end_time,
-		 job_ptr->nodes, job_ptr->node_cnt
-#ifdef HAVE_BG
-		 , connect_type, reboot, rotate, maxprocs, geometry,
-		 start, blockid
-#endif
-		 );
+	if(connect_type) {
+		xstrfmtcat(query, ", '%s'", connect_type);
+		xfree(connect_type);
+	}
+	if(reboot) {
+		xstrfmtcat(query, ", '%s'", reboot);
+		xfree(reboot);
+	}
+	if(rotate) {
+		xstrfmtcat(query, ", '%s'", rotate);
+		xfree(rotate);
+	}
+	if(maxprocs) {
+		xstrfmtcat(query, ", '%s'", maxprocs);
+		xfree(maxprocs);
+	}
+	if(geometry) {
+		xstrfmtcat(query, ", '%s'", geometry);
+		xfree(geometry);
+	}
+	if(start) {
+		xstrfmtcat(query, ", '%s'", start);
+		xfree(start);
+	}
+	if(blockid) {
+		xstrfmtcat(query, ", '%s'", blockid);
+		xfree(blockid);
+	}
+	xstrcat(query, ")");
 	//info("here is the query %s", query);
 
 	rc = pgsql_db_query(jobcomp_pgsql_db, query);
diff --git a/src/plugins/jobcomp/pgsql/pgsql_jobcomp_process.c b/src/plugins/jobcomp/pgsql/pgsql_jobcomp_process.c
index 38e477527..755d1b8e5 100644
--- a/src/plugins/jobcomp/pgsql/pgsql_jobcomp_process.c
+++ b/src/plugins/jobcomp/pgsql/pgsql_jobcomp_process.c
@@ -188,7 +188,6 @@ extern List pgsql_jobcomp_process_get_jobs(List selected_steps,
 		}
 		job->timelimit =
 			xstrdup(PQgetvalue(result, i, JOBCOMP_REQ_TIMELIMIT));
-#ifdef HAVE_BG
 		if(PQgetvalue(result, i, JOBCOMP_REQ_MAXPROCS))
 			job->max_procs =
 				atoi(PQgetvalue(result, i, 
@@ -205,9 +204,7 @@ extern List pgsql_jobcomp_process_get_jobs(List selected_steps,
 			xstrdup(PQgetvalue(result, i, JOBCOMP_REQ_GEOMETRY));
 		job->bg_start_point =
 			xstrdup(PQgetvalue(result, i, JOBCOMP_REQ_START));
-#endif
 		list_append(job_list, job);
-
 	}
 	
 	PQclear(result);
diff --git a/src/plugins/jobcomp/script/jobcomp_script.c b/src/plugins/jobcomp/script/jobcomp_script.c
index 8f6348a67..1faeca72c 100644
--- a/src/plugins/jobcomp/script/jobcomp_script.c
+++ b/src/plugins/jobcomp/script/jobcomp_script.c
@@ -1,6 +1,6 @@
 /*****************************************************************************\
  *  jobcomp_script.c - Script running slurm job completion logging plugin.
- *  $Id: jobcomp_script.c 14500 2008-07-11 23:00:14Z jette $
+ *  $Id: jobcomp_script.c 15366 2008-10-09 19:56:24Z da $
  *****************************************************************************
  *  Produced at Center for High Performance Computing, North Dakota State
  *  University
@@ -64,6 +64,7 @@
 #include "src/common/slurm_protocol_defs.h"
 #include "src/common/xmalloc.h"
 #include "src/common/xstring.h"
+#include "src/common/node_select.h"
 #include "src/common/list.h"
 #include "src/slurmctld/slurmctld.h"
 
@@ -158,6 +159,15 @@ struct jobcomp_info {
 	char *partition;
 	char *jobstate;
 	char *account;
+#ifdef HAVE_BG
+	char *connect_type;
+	char *reboot;
+	char *rotate;
+	char *maxprocs;
+	char *geometry;
+	char *block_start;
+	char *blockid;
+#endif
 };
 
 static struct jobcomp_info * _jobcomp_info_create (struct job_record *job)
@@ -188,7 +198,22 @@ static struct jobcomp_info * _jobcomp_info_create (struct job_record *job)
 	j->nprocs = job->total_procs;
 	j->nnodes = job->node_cnt;
 	j->account = job->account ? xstrdup (job->account) : NULL;
-
+#ifdef HAVE_BG
+	j->connect_type = select_g_xstrdup_jobinfo(job->select_jobinfo,
+						   SELECT_PRINT_CONNECTION);
+	j->reboot = select_g_xstrdup_jobinfo(job->select_jobinfo,
+					     SELECT_PRINT_REBOOT);
+	j->rotate = select_g_xstrdup_jobinfo(job->select_jobinfo,
+					     SELECT_PRINT_ROTATE);
+	j->maxprocs = select_g_xstrdup_jobinfo(job->select_jobinfo,
+					       SELECT_PRINT_MAX_PROCS);
+	j->geometry = select_g_xstrdup_jobinfo(job->select_jobinfo,
+					       SELECT_PRINT_GEOMETRY);
+	j->block_start = select_g_xstrdup_jobinfo(job->select_jobinfo,
+						  SELECT_PRINT_START);
+	j->blockid = select_g_xstrdup_jobinfo(job->select_jobinfo,
+					      SELECT_PRINT_BG_ID);
+#endif
 	return (j);
 }
 
@@ -201,6 +226,15 @@ static void _jobcomp_info_destroy (struct jobcomp_info *j)
 	xfree (j->nodes);
 	xfree (j->jobstate);
 	xfree (j->account);
+#ifdef HAVE_BG
+	xfree (j->connect_type);
+	xfree (j->reboot);
+	xfree (j->rotate);
+	xfree (j->maxprocs);
+	xfree (j->geometry);
+	xfree (j->block_start);
+	xfree (j->blockid);
+#endif
 	xfree (j);
 }
 
@@ -305,6 +339,16 @@ static char ** _create_environment (struct jobcomp_info *job)
 	_env_append (&env, "JOBSTATE",  job->jobstate);
 	_env_append (&env, "PARTITION", job->partition);
 	
+#ifdef HAVE_BG
+	_env_append (&env, "CONNECT_TYPE", job->connect_type);
+	_env_append (&env, "REBOOT",       job->reboot);
+	_env_append (&env, "ROTATE",       job->rotate);
+	_env_append (&env, "MAXPROCS",     job->maxprocs);
+	_env_append (&env, "GEOMETRY",     job->geometry);
+	_env_append (&env, "BLOCK_START",  job->block_start);
+	_env_append (&env, "BLOCKID",      job->blockid);
+#endif
+
 	if (job->limit == INFINITE)
 		_env_append (&env, "LIMIT", "UNLIMITED");
 	else 
@@ -317,6 +361,7 @@ static char ** _create_environment (struct jobcomp_info *job)
 #else
 	_env_append (&env, "PATH", "/bin:/usr/bin");
 #endif
+
 	return (env);
 }
 
diff --git a/src/plugins/sched/backfill/backfill.c b/src/plugins/sched/backfill/backfill.c
index 16373f2e6..e512c0f55 100644
--- a/src/plugins/sched/backfill/backfill.c
+++ b/src/plugins/sched/backfill/backfill.c
@@ -90,9 +90,15 @@ static bool stop_backfill = false;
 static pthread_mutex_t thread_flag_mutex = PTHREAD_MUTEX_INITIALIZER;
 
 /* Backfill scheduling has considerable overhead, 
- * so only attempt it every BACKFILL_INTERVAL seconds */
+ *	so only attempt it every BACKFILL_INTERVAL seconds.
+ * Much of the scheduling for BlueGene happens through backfill,
+ *	so we run it more frequently. */
 #ifndef BACKFILL_INTERVAL
-#  define BACKFILL_INTERVAL	10
+#  ifdef HAVE_BG
+#    define BACKFILL_INTERVAL	5
+#  else
+#    define BACKFILL_INTERVAL	10
+#  endif
 #endif
 
 /* Set __DEBUG to get detailed logging for this thread without 
@@ -211,13 +217,13 @@ static void _attempt_backfill(void)
 {
 	bool filter_root = false;
 	struct job_queue *job_queue = NULL;
-	int i, j,job_queue_size, node_space_recs = 0;
+	int i, j,job_queue_size, node_space_recs;
 	struct job_record *job_ptr;
 	struct part_record *part_ptr;
 	uint32_t end_time, end_reserve, time_limit;
 	uint32_t min_nodes, max_nodes, req_nodes;
 	uint16_t orig_shared;
-	bitstr_t *avail_bitmap = NULL;
+	bitstr_t *avail_bitmap = NULL, *tmp_bitmap;
 	time_t now = time(NULL);
 	node_space_map_t node_space[MAX_BACKFILL_JOB_CNT + 2];
 
@@ -232,9 +238,9 @@ static void _attempt_backfill(void)
 
 	node_space[0].begin_time = now;
 	node_space[0].end_time = now + BACKFILL_WINDOW;
-	node_space[0].avail_bitmap = bit_alloc(node_record_count);
-	bit_or(node_space[0].avail_bitmap, avail_node_bitmap);
+	node_space[0].avail_bitmap = bit_copy(avail_node_bitmap);
 	node_space[0].next = 0;
+	node_space_recs = 1;
 #if __DEBUG
 	_dump_node_space_table(node_space);
 #endif
@@ -242,6 +248,10 @@ static void _attempt_backfill(void)
 	for (i = 0; i < job_queue_size; i++) {
 		job_ptr = job_queue[i].job_ptr;
 		part_ptr = job_ptr->part_ptr;
+#if __DEBUG
+		info("backfill test for job %u", job_ptr->job_id);
+#endif
+
 		if (part_ptr == NULL) {
 			part_ptr = find_part_record(job_ptr->partition);
 			xassert(part_ptr);
@@ -281,10 +291,18 @@ static void _attempt_backfill(void)
 		}
 
 		/* Determine job's expected completion time */
-		if (job_ptr->time_limit == NO_VAL)
-			time_limit = part_ptr->max_time;
-		else
-			time_limit = job_ptr->time_limit;
+		if (job_ptr->time_limit == NO_VAL) {
+			if (part_ptr->max_time == INFINITE)
+				time_limit = 365 * 24 * 60; /* one year */
+			else
+				time_limit = part_ptr->max_time;
+		} else {
+			if (part_ptr->max_time == INFINITE)
+				time_limit = job_ptr->time_limit;
+			else
+				time_limit = MIN(job_ptr->time_limit,
+						 part_ptr->max_time);
+		}
 		end_time = (time_limit * 60) + now;
 
 		/* Identify usable nodes for this job */
@@ -292,10 +310,11 @@ static void _attempt_backfill(void)
 		avail_bitmap = bit_copy(part_ptr->node_bitmap);
 		bit_and(avail_bitmap, up_node_bitmap);
 		for (j=0; ; ) {
-			if (node_space[j].end_time <= end_time) {
+			if (node_space[j].begin_time <= end_time) {
 				bit_and(avail_bitmap, 
 					node_space[j].avail_bitmap);
-			}
+			} else
+				break;
 			if ((j = node_space[j].next) == 0)
 				break;
 		}
@@ -312,26 +331,31 @@ static void _attempt_backfill(void)
 				    avail_bitmap)))
 			continue;	/* required nodes missing */
 		if (bit_set_count(avail_bitmap) < min_nodes)
-			continue;	/* no nodes remain */
+			continue;	/* insufficient nodes remain */
 
 		/* Try to schedule the job. First on dedicated nodes
 		 * then on shared nodes (if so configured). */
 		orig_shared = job_ptr->details->shared;
 		job_ptr->details->shared = 0;
-		j = select_g_job_test(job_ptr, avail_bitmap,
-				min_nodes, max_nodes, req_nodes, 
-				SELECT_MODE_WILL_RUN);
+		tmp_bitmap = bit_copy(avail_bitmap);
+		j = select_g_job_test(job_ptr, avail_bitmap, min_nodes,
+				      max_nodes, req_nodes,
+				      SELECT_MODE_WILL_RUN);
 		job_ptr->details->shared = orig_shared;
 		if ((j != SLURM_SUCCESS) && (orig_shared != 0)) {
-			j = select_g_job_test(job_ptr, avail_bitmap,
-					min_nodes, max_nodes, req_nodes, 
-					SELECT_MODE_WILL_RUN);
-		}
+			FREE_NULL_BITMAP(avail_bitmap);
+			avail_bitmap= tmp_bitmap;
+			j = select_g_job_test(job_ptr, avail_bitmap, min_nodes,
+					      max_nodes, req_nodes,
+					      SELECT_MODE_WILL_RUN);
+		} else
+			FREE_NULL_BITMAP(tmp_bitmap);
 		if (j != SLURM_SUCCESS)
 			continue;	/* not runable */
-		if (job_ptr->start_time <= now) {
-			/* Start the job now */
-			_start_job(job_ptr, avail_bitmap);
+
+		if ((job_ptr->start_time <= now) &&
+		    (_start_job(job_ptr, avail_bitmap) != SLURM_SUCCESS)) {
+			/* Planned to start job, but something bad happended */
 			break;
 		}
 		if (job_ptr->start_time > (now + BACKFILL_WINDOW)) {
@@ -471,50 +495,10 @@ static void _add_reservation(uint32_t start_time, uint32_t end_reserve,
 			break;
 	}
 
-#if 0
-	/* This records end of reservation so we maintain a full map
-	 * of when jobs start and end. Since we only care about starting 
-	 * jobs right now, the end of reservation time is not very useful
-	 * unless we want to track expected job initiation time, which 
-	 * would necessitate additional logic. */
-	for (j=0; ; ) {
-		if ((node_space[j].begin_time < end_reserve) &&
-		    (node_space[j].end_time   > end_reserve)) {
-			/* insert end entry record */
-			i = *node_space_recs;
-			node_space[i].begin_time = node_space[j].begin_time;
-			node_space[j].begin_time = end_reserve;
-			node_space[i].end_time = end_reserve;
-			node_space[i].avail_bitmap = 
-				bit_copy(node_space[j].avail_bitmap);
-			node_space[i].next = j;
-			node_space[previous].next = i;
-			(*node_space_recs)++;
-			break;
-		}
-		if (node_space[j].end_time == end_reserve) {
-			/* no need to insert end entry record */
-			break;
-		}
-		previous = j;
-		if ((j = node_space[j].next) == 0)
-			break;
-	}
-
-	for (j=0; ; ) {
-		if ((node_space[j].begin_time >= start_time) &&
-		    (node_space[j].end_time   <= end_reserve)) {
-			bit_and(node_space[j].avail_bitmap, res_bitmap);
-		}
-		if ((j = node_space[j].next) == 0)
-			break;
-	}
-#else
 	for (j=0; ; ) {
 		if (node_space[j].begin_time >= start_time)
 			bit_and(node_space[j].avail_bitmap, res_bitmap);
 		if ((j = node_space[j].next) == 0)
 			break;
 	}
-#endif
 }
diff --git a/src/plugins/sched/backfill/backfill_wrapper.c b/src/plugins/sched/backfill/backfill_wrapper.c
index cc73e722b..013e47351 100644
--- a/src/plugins/sched/backfill/backfill_wrapper.c
+++ b/src/plugins/sched/backfill/backfill_wrapper.c
@@ -63,15 +63,6 @@ static pthread_mutex_t thread_flag_mutex = PTHREAD_MUTEX_INITIALIZER;
 /**************************************************************************/
 int init( void )
 {
-#ifdef HAVE_BG
-	/* Backfill scheduling on Blue Gene is possible, 
-	 * but difficult and would require substantial 
-	 * software development to accomplish. 
-	 * It would need to consider each job's geometry, 
-	 * ability to rotate, node-use (coprocessor or virtual)
-	 * and conn-type (mesh, torus or nav). */
-	fatal("Backfill scheduler incompatable with Blue Gene");
-#else
 	pthread_attr_t attr;
 
 	verbose( "Backfill scheduler plugin loaded" );
@@ -89,7 +80,7 @@ int init( void )
 		error("Unable to start backfill thread: %m");
 	pthread_mutex_unlock( &thread_flag_mutex );
 	slurm_attr_destroy( &attr );
-#endif
+
 	return SLURM_SUCCESS;
 }
 
diff --git a/src/plugins/sched/wiki/msg.c b/src/plugins/sched/wiki/msg.c
index e0e18decf..94980a4ba 100644
--- a/src/plugins/sched/wiki/msg.c
+++ b/src/plugins/sched/wiki/msg.c
@@ -677,6 +677,7 @@ static void	_send_reply(slurm_fd new_fd, char *response)
 	checksum(sum, auth_key, (buf+20));   /* overwrite "CK=dummy..." above */
 	memcpy(buf, sum, 19);
 
+	i = strlen(buf) + 1;
 	(void) _send_msg(new_fd, buf, i);
 	xfree(buf);
 }
diff --git a/src/plugins/sched/wiki2/job_modify.c b/src/plugins/sched/wiki2/job_modify.c
index 714a38976..a7f203232 100644
--- a/src/plugins/sched/wiki2/job_modify.c
+++ b/src/plugins/sched/wiki2/job_modify.c
@@ -235,7 +235,7 @@ host_fini:	if (rc) {
 /* Modify a job:
  *	CMD=MODIFYJOB ARG=<jobid> PARTITION=<name> NODES=<number>
  *		DEPEND=afterany:<jobid> TIMELIMT=<seconds> BANK=<name>
- *		MINSTARTTIME=<uts> RFEATURES=<features>
+ *		JOBNAME=<name> MINSTARTTIME=<uts> RFEATURES=<features>
  * RET 0 on success, -1 on failure */
 extern int	job_modify_wiki(char *cmd_ptr, int *err_code, char **err_msg)
 {
diff --git a/src/plugins/sched/wiki2/msg.c b/src/plugins/sched/wiki2/msg.c
index 159535ebc..5d31a8873 100644
--- a/src/plugins/sched/wiki2/msg.c
+++ b/src/plugins/sched/wiki2/msg.c
@@ -738,6 +738,7 @@ static void	_send_reply(slurm_fd new_fd, char *response)
 	checksum(sum, auth_key, (buf+20));   /* overwrite "CK=dummy..." above */
 	memcpy(buf, sum, 19);
 
+	i = strlen(buf) + 1;
 	(void) _send_msg(new_fd, buf, i);
 	xfree(buf);
 }
diff --git a/src/plugins/select/bluegene/block_allocator/block_allocator.c b/src/plugins/select/bluegene/block_allocator/block_allocator.c
index 4a6e99f7b..c2981c764 100644
--- a/src/plugins/select/bluegene/block_allocator/block_allocator.c
+++ b/src/plugins/select/bluegene/block_allocator/block_allocator.c
@@ -1,7 +1,7 @@
 /*****************************************************************************\
  *  block_allocator.c - Assorted functions for layout of bglblocks, 
  *	 wiring, mapping for smap, etc.
- *  $Id: block_allocator.c 14952 2008-09-03 16:08:14Z da $
+ *  $Id: block_allocator.c 15191 2008-09-26 15:25:46Z da $
  *****************************************************************************
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -96,6 +96,11 @@ s_p_options_t bg_conf_file_options[] = {
 	{NULL}
 };
 
+typedef enum {
+	BLOCK_ALGO_FIRST,
+	BLOCK_ALGO_SECOND
+} block_algo_t;
+
 #ifdef HAVE_BG
 /** internal helper functions */
 #ifdef HAVE_BG_FILES
@@ -153,7 +158,7 @@ static void _delete_path_list(void *object);
 static int _find_match(ba_request_t* ba_request, List results);
 
 /** */
-static bool _node_used(ba_node_t* ba_node, int *geometry);
+static bool _node_used(ba_node_t* ba_node, int x_size);
 
 /* */
 static void _switch_config(ba_node_t* source, ba_node_t* target, int dim, 
@@ -167,14 +172,9 @@ static int _set_external_wires(int dim, int count, ba_node_t* source,
 static char *_set_internal_wires(List nodes, int size, int conn_type);
 
 /* */
-static int _find_x_path(List results, ba_node_t *ba_node, 
-			int *start, int *first, 
-			int *geometry, int found, int conn_type);
-
-/* */
-static int _find_x_path2(List results, ba_node_t *ba_node, 
-			 int *start, int *first, 
-			 int *geometry, int found, int conn_type);
+static int _find_x_path(List results, ba_node_t *ba_node, int *start,
+			int x_size, int found, int conn_type, 
+			block_algo_t algo);
 
 /* */
 static int _remove_node(List results, int *node_tar);
@@ -190,7 +190,7 @@ static int _find_next_free_using_port_2(ba_switch_t *curr_switch,
 /* 			     int count, int highest_phys_x);  */
 /* */
 static int _finish_torus(ba_switch_t *curr_switch, int source_port, 
-			   List nodes, int dim, int count, int *start);
+			 int dim, int count, int *start);
 /* */
 static int *_set_best_path();
 
@@ -1432,6 +1432,7 @@ end_it:
  * IN start - where to start the allocation.
  * IN geometry - the requested geometry of the block.
  * IN conn_type - mesh, torus, or small.
+ *
  * RET char * - hostlist of midplanes results represent must be
  *     xfreed.  NULL on failure
  */
@@ -1450,24 +1451,22 @@ extern char *set_bg_block(List results, int *start,
 	   || start[Y]>=DIM_SIZE[Y]
 	   || start[Z]>=DIM_SIZE[Z])
 		return NULL;
-	if(geometry[X]<=0 
-	   || geometry[Y]<=0
-	   || geometry[Z]<=0) {
+
+	if(geometry[X] <= 0 || geometry[Y] <= 0 || geometry[Z] <= 0) {
 		error("problem with geometry %c%c%c, needs to be at least 111",
 		      alpha_num[geometry[X]],
 		      alpha_num[geometry[Y]],
 		      alpha_num[geometry[Z]]);		      
 		return NULL;
 	}
+
 	size = geometry[X] * geometry[Y] * geometry[Z];
-	ba_node = &ba_system_ptr->
-		grid[start[X]][start[Y]][start[Z]];
+	ba_node = &ba_system_ptr->grid[start[X]][start[Y]][start[Z]];
 #else
 	if(start[X]>=DIM_SIZE[X])
 		return NULL;
 	size = geometry[X];
-	ba_node = &ba_system_ptr->
-			grid[start[X]];	
+	ba_node = &ba_system_ptr->grid[start[X]];	
 #endif
 	
 
@@ -1478,36 +1477,34 @@ extern char *set_bg_block(List results, int *start,
 		results = list_create(NULL);
 	else
 		send_results = 1;
-		
+	/* This midplane should have already been checked if it was in
+	   use or not */
 	list_append(results, ba_node);
 	if(conn_type == SELECT_SMALL) {
 		/* adding the ba_node and ending */
 		ba_node->used = true;
-		name = xmalloc(4);
-		snprintf(name, 4, "%c%c%c",
-			 alpha_num[ba_node->coord[X]],
-			 alpha_num[ba_node->coord[Y]],
-			 alpha_num[ba_node->coord[Z]]);
+		name = xstrdup_printf("%c%c%c",
+				      alpha_num[ba_node->coord[X]],
+				      alpha_num[ba_node->coord[Y]],
+				      alpha_num[ba_node->coord[Z]]);
 		goto end_it; 
 	}
 	found = _find_x_path(results, ba_node,
 			     ba_node->coord, 
-			     ba_node->coord, 
-			     geometry, 
+			     geometry[X], 
 			     1,
-			     conn_type);
+			     conn_type, BLOCK_ALGO_FIRST);
 
 	if(!found) {
 		debug2("trying less efficient code");
 		remove_block(results, color_count);
 		list_delete_all(results, &empty_null_destroy_list, "");
 		list_append(results, ba_node);
-		found = _find_x_path2(results, ba_node,
-				      ba_node->coord,
-				      ba_node->coord,
-				      geometry,
-				      1,
-				      conn_type);
+		found = _find_x_path(results, ba_node,
+				     ba_node->coord,
+				     geometry[X],
+				     1,
+				     conn_type, BLOCK_ALGO_SECOND);
 	}
 	if(found) {
 #ifdef HAVE_BG
@@ -2709,10 +2706,25 @@ static int _append_geo(int *geometry, List geos, int rotate)
 }
 
 /*
+ * Fill in the paths and extra midplanes we need for the block.
+ * Basically copy the x path sent in with the start_list in each Y anx
+ * Z dimension filling in every midplane for the block and then
+ * completing the Y and Z wiring, tying the whole block together.
  *
+ * IN/OUT results - total list of midplanes after this function
+ *        returns successfully.  Should be
+ *        an exact copy of the start_list at first.
+ * IN start_list - exact copy of results at first, This should only be
+ *        a list of midplanes on the X dim.  We will work off this and
+ *        the geometry to fill in this wiring for the X dim in all the
+ *        Y and Z coords.
+ * IN geometry - What the block looks like
+ * IN conn_type - Mesh or Torus
+ * 
+ * RET: 0 on failure 1 on success
  */
 static int _fill_in_coords(List results, List start_list,
-			    int *geometry, int conn_type)
+			   int *geometry, int conn_type)
 {
 	ba_node_t *ba_node = NULL;
 	ba_node_t *check_node = NULL;
@@ -2722,8 +2734,9 @@ static int _fill_in_coords(List results, List start_list,
 	ba_switch_t *curr_switch = NULL; 
 	ba_switch_t *next_switch = NULL; 
 	
-	if(!start_list)
+	if(!start_list || !results)
 		return 0;
+	/* go through the start_list and add all the midplanes */
 	itr = list_iterator_create(start_list);
 	while((check_node = (ba_node_t*) list_next(itr))) {		
 		curr_switch = &check_node->axis_switch[X];
@@ -2744,18 +2757,23 @@ static int _fill_in_coords(List results, List start_list,
 					[check_node->coord[X]]
 					[check_node->coord[Y]+y]
 					[check_node->coord[Z]+z];
-				if(ba_node->coord[Y] 
-				   == check_node->coord[Y]
-				   && ba_node->coord[Z] 
-				   == check_node->coord[Z])
+
+				if(ba_node->coord[Y] == check_node->coord[Y]
+				   && ba_node->coord[Z] == check_node->coord[Z])
 					continue;
-				if (!_node_used(ba_node,geometry)) {
+
+				if (!_node_used(ba_node, geometry[X])) {
 					debug3("here Adding %c%c%c",
 					       alpha_num[ba_node->coord[X]],
 					       alpha_num[ba_node->coord[Y]],
 					       alpha_num[ba_node->coord[Z]]);
 					list_append(results, ba_node);
 					next_switch = &ba_node->axis_switch[X];
+					
+					/* since we are going off the
+					 * main system we can send NULL
+					 * here
+					 */
 					_copy_the_path(NULL, curr_switch, 
 						       next_switch, 
 						       0, X);
@@ -2773,7 +2791,7 @@ static int _fill_in_coords(List results, List start_list,
 	list_iterator_destroy(itr);
 	
 	itr = list_iterator_create(results);
-	while((ba_node = (ba_node_t*) list_next(itr))) {	
+	while((ba_node = (ba_node_t*) list_next(itr))) {
 		if(!_find_yz_path(ba_node, 
 				  check_node->coord, 
 				  geometry, 
@@ -2788,6 +2806,24 @@ failed:
 	return rc;
 }
 
+/*
+ * Copy a path through the wiring of a switch to another switch on a
+ * starting port on a dimension.
+ *
+ * IN/OUT: nodes - Local list of midplanes you are keeping track of.  If
+ *         you visit any new midplanes a copy from ba_system_grid  
+ *         will be added to the list.  If NULL the path will be
+ *         set in mark_switch of the main virtual system (ba_system_grid).  
+ * IN: curr_switch - The switch you want to copy the path of
+ * IN/OUT: mark_switch - The switch you want to fill in.  On success
+ *         this switch will contain a complete path from the curr_switch
+ *         starting from the source port.
+ * IN: source - source port number (If calling for the first time
+ *         should be 0 since we are looking for 1 at the end)
+ * IN: dim - Dimension XYZ
+ *
+ * RET: on success 1, on error 0
+ */
 static int _copy_the_path(List nodes, ba_switch_t *curr_switch, 
 			  ba_switch_t *mark_switch, 
 			  int source, int dim)
@@ -2798,7 +2834,8 @@ static int _copy_the_path(List nodes, ba_switch_t *curr_switch,
 	int port_tar, port_tar1;
 	ba_switch_t *next_switch = NULL; 
 	ba_switch_t *next_mark_switch = NULL; 
-	/*set the switch to not be used */
+       
+	/* Copy the source used and port_tar */
 	mark_switch->int_wire[source].used = 
 		curr_switch->int_wire[source].used;
 	mark_switch->int_wire[source].port_tar = 
@@ -2806,6 +2843,7 @@ static int _copy_the_path(List nodes, ba_switch_t *curr_switch,
 
 	port_tar = curr_switch->int_wire[source].port_tar;
 	
+	/* Now to the same thing from the other end */
 	mark_switch->int_wire[port_tar].used = 
 		curr_switch->int_wire[port_tar].used;
 	mark_switch->int_wire[port_tar].port_tar = 
@@ -2828,25 +2866,38 @@ static int _copy_the_path(List nodes, ba_switch_t *curr_switch,
 		       port_tar);	
 	
 	if(port_tar == 1) {
+		/* found the end of the line */
 		mark_switch->int_wire[1].used = 
 			curr_switch->int_wire[1].used;
 		mark_switch->int_wire[1].port_tar = 
 			curr_switch->int_wire[1].port_tar;
 		return 1;
 	}
-
+	
 	mark_node_tar = mark_switch->ext_wire[port_tar].node_tar;
 	port_tar = curr_switch->ext_wire[port_tar].port_tar;
 	
 	if(node_curr[X] == node_tar[X]
 	   && node_curr[Y] == node_tar[Y]
 	   && node_curr[Z] == node_tar[Z]) {
-		debug4("something bad happened!!");
+		/* We are going to the same node! this should never
+		   happen */
+		debug4("something bad happened!! "
+		       "we are on %c%c%c and are going to it "
+		       "from port %d - > %d", 
+		       alpha_num[node_curr[X]],
+		       alpha_num[node_curr[Y]],
+		       alpha_num[node_curr[Z]],
+		       port_tar1, port_tar);
 		return 0;
 	}
+
+	/* see what the next switch is going to be */
 	next_switch = &ba_system_ptr->
 		grid[node_tar[X]][node_tar[Y]][node_tar[Z]].axis_switch[dim];
 	if(!nodes) {
+		/* If no nodes then just get the next switch to fill
+		   in from the main system */
 		next_mark_switch = &ba_system_ptr->
 			grid[mark_node_tar[X]]
 			[mark_node_tar[Y]]
@@ -2855,6 +2906,7 @@ static int _copy_the_path(List nodes, ba_switch_t *curr_switch,
 	} else {
 		ba_node_t *ba_node = NULL;
 		ListIterator itr = list_iterator_create(nodes);
+		/* see if we have already been to this node */
 		while((ba_node = list_next(itr))) {
 			if (ba_node->coord[X] == mark_node_tar[X] &&
 			    ba_node->coord[Y] == mark_node_tar[Y] &&
@@ -2863,6 +2915,7 @@ static int _copy_the_path(List nodes, ba_switch_t *curr_switch,
 		}
 		list_iterator_destroy(itr);
 		if(!ba_node) {
+			/* If node grab a copy and add it to the list */
 			ba_node = ba_copy_node(&ba_system_ptr->
 					       grid[mark_node_tar[X]]
 					       [mark_node_tar[Y]]
@@ -2877,8 +2930,10 @@ static int _copy_the_path(List nodes, ba_switch_t *curr_switch,
 		next_mark_switch = &ba_node->axis_switch[dim];
 			
 	}
+
+	/* Keep going until we reach the end of the line */
 	return _copy_the_path(nodes, next_switch, next_mark_switch,
-		       port_tar, dim);
+			      port_tar, dim);
 }
 
 static int _find_yz_path(ba_node_t *ba_node, int *first, 
@@ -2893,23 +2948,19 @@ static int _find_yz_path(ba_node_t *ba_node, int *first,
 
 	for(i2=1;i2<=2;i2++) {
 		if(geometry[i2] > 1) {
-			debug3("%d node %c%c%c"
-			       " port 2 -> ",
+			debug3("%d node %c%c%c port 2 -> ",
 			       i2,
 			       alpha_num[ba_node->coord[X]],
 			       alpha_num[ba_node->coord[Y]],
 			       alpha_num[ba_node->coord[Z]]);
 							       
-			dim_curr_switch = 
-				&ba_node->
-				axis_switch[i2];
+			dim_curr_switch = &ba_node->axis_switch[i2];
 			if(dim_curr_switch->int_wire[2].used) {
 				debug4("returning here");
 				return 0;
 			}
 							
-			node_tar = dim_curr_switch->
-				ext_wire[2].node_tar;
+			node_tar = dim_curr_switch->ext_wire[2].node_tar;
 							
 			next_node = &ba_system_ptr->
 				grid[node_tar[X]][node_tar[Y]][node_tar[Z]];
@@ -2925,27 +2976,32 @@ static int _find_yz_path(ba_node_t *ba_node, int *first,
 			}
 			debug4("%d %d %d %d",i2, node_tar[i2],
 			       first[i2], geometry[i2]);
-			if(node_tar[i2] < first[i2])
-				count = DIM_SIZE[i2]-first[i2]+node_tar[i2];
-			else
-				count = node_tar[i2]+first[i2];
-			if((count) == (geometry[i2])) {
+
+			/* Here we need to see where we are in
+			 * reference to the geo of this dimension.  If
+			 * we have not gotten the number we need in
+			 * the direction we just go to the next node
+			 * with 5 -> 1.  If we have all the midplanes
+			 * we need then we go through and finish the
+			 * torus if needed
+			 */			 
+			if(node_tar[i2] < first[i2]) 
+				count = node_tar[i2]+(DIM_SIZE[i2]-first[i2]);
+			else 
+				count = (node_tar[i2]-first[i2]);
+
+			if(count == geometry[i2]) {
 				debug4("found end of me %c%c%c",
 				       alpha_num[node_tar[X]],
 				       alpha_num[node_tar[Y]],
 				       alpha_num[node_tar[Z]]);
 				if(conn_type == SELECT_TORUS) {
-					dim_curr_switch->
-						int_wire[0].used = 1;
-					dim_curr_switch->
-						int_wire[0].port_tar
+					dim_curr_switch->int_wire[0].used = 1;
+					dim_curr_switch->int_wire[0].port_tar
 						= 2;
-					dim_curr_switch->
-						int_wire[2].used
-						= 1;
-					dim_curr_switch->
-						int_wire[2].
-						port_tar = 0;
+					dim_curr_switch->int_wire[2].used = 1;
+					dim_curr_switch->int_wire[2].port_tar
+						= 0;
 					dim_curr_switch = dim_next_switch;
 									
 					while(node_tar[i2] != first[i2]) {
@@ -3005,7 +3061,7 @@ static int _find_yz_path(ba_node_t *ba_node, int *first,
 						port_tar = 5;
 				}
 								
-			} else {
+			} else if (count < geometry[i2]) {
 				if(conn_type == SELECT_TORUS || 
 				   (conn_type == SELECT_MESH && 
 				    (node_tar[i2] != first[i2]))) {
@@ -3032,7 +3088,40 @@ static int _find_yz_path(ba_node_t *ba_node, int *first,
 						int_wire[1].port_tar
 						= 5;
 				}
+			} else {
+				error("We were only looking for %d "
+				      "in the %d dim, but now we have %d",
+				      geometry[i2], i2, count);
+				return 0;
 			}
+		} else if(geometry[i2] == 1) {
+			/* FIX ME: This is put here because we got
+			   into a state where the Y dim was not being
+			   processed correctly.  This will set up the
+			   0 -> 1 port correctly.  We should probably
+			   find out why this was happening in the
+			   first place though.  A reproducer was to
+			   have 
+			   BPs=[310x323] Type=TORUS
+			   BPs=[200x233] Type=TORUS
+			   BPs=[300x303] Type=TORUS
+			   BPs=[100x133] Type=TORUS
+			   BPs=[000x033] Type=TORUS
+			   BPs=[400x433] Type=TORUS
+			   and then add 
+			   BPs=[330x333] Type=TORUS
+			*/
+
+			dim_curr_switch = &ba_node->axis_switch[i2];
+			debug3("%d node %c%c%c port 0 -> 1",
+			       i2,
+			       alpha_num[ba_node->coord[X]],
+			       alpha_num[ba_node->coord[Y]],
+			       alpha_num[ba_node->coord[Z]]);
+			dim_curr_switch->int_wire[0].used = 1;
+			dim_curr_switch->int_wire[0].port_tar = 1;
+			dim_curr_switch->int_wire[1].used = 1;
+			dim_curr_switch->int_wire[1].port_tar = 0;
 		}
 	}
 	return 1;
@@ -3350,7 +3439,7 @@ start_again:
 #endif
 			;
 
-		if (!_node_used(ba_node, ba_request->geometry)) {
+		if (!_node_used(ba_node, ba_request->geometry[X])) {
 			debug3("trying this node %c%c%c %c%c%c %d",
 			       alpha_num[start[X]],
 			       alpha_num[start[Y]],
@@ -3419,10 +3508,15 @@ requested_end:
 	return 0;
 }
 
-/* bool _node_used(ba_node_t* ba_node, int geometry,  */
-static bool _node_used(ba_node_t* ba_node, int *geometry)
+/* 
+ * Used to check if midplane is usable in the block we are creating
+ *
+ * IN: ba_node - node to check if is used
+ * IN: x_size - How big is the block in the X dim used to see if the
+ *     wires are full hence making this midplane unusable.
+ */
+static bool _node_used(ba_node_t* ba_node, int x_size)
 {
-	int i=0;
 	ba_switch_t* ba_switch = NULL;
 	
 	/* if we've used this node in another block already */
@@ -3433,17 +3527,27 @@ static bool _node_used(ba_node_t* ba_node, int *geometry)
 		       alpha_num[ba_node->coord[Z]]);
 		return true;
 	}
-	/* if we've used this nodes switches completely in another 
-	   block already */
-	for(i=0;i<1;i++) {
-		if(geometry[i]>1) {
-			ba_switch = &ba_node->axis_switch[i];
-			
-			if(ba_switch->int_wire[3].used 
-			   && ba_switch->int_wire[5].used) {
-				debug3("switch in use dim %d!",i);
-				return true;
-			}
+	/* Check If we've used this node's switches completely in another 
+	   block already.  Right now we are only needing to look at
+	   the X dim since it is the only one with extra wires.  This
+	   can be set up to do all the dim's if in the future if it is
+	   needed. We only need to check this if we are planning on
+	   using more than 1 midplane in the block creation */
+	if(x_size > 1) {
+		/* get the switch of the X Dimension */
+		ba_switch = &ba_node->axis_switch[X];
+		
+		/* If both of these ports are used then the node
+		   is in use since there are no more wires we
+		   can use since these can not connect to each
+		   other they must be connected to the other ports.
+		*/
+		if(ba_switch->int_wire[3].used && ba_switch->int_wire[5].used) {
+			debug3("switch full in the X dim on node %c%c%c!",
+			       alpha_num[ba_node->coord[X]],
+			       alpha_num[ba_node->coord[Y]],
+			       alpha_num[ba_node->coord[Z]]);
+			return true;
 		}
 	}
 		
@@ -3844,7 +3948,7 @@ static char *_set_internal_wires(List nodes, int size, int conn_type)
 	name = xmalloc(BUFSIZE);
 	hostlist = hostlist_create(NULL);
 	itr = list_iterator_create(nodes);
-	while((ba_node[count] = (ba_node_t*) list_next(itr))) {
+	while((ba_node[count] = list_next(itr))) {
 		snprintf(temp_name, sizeof(temp_name), "%c%c%c", 
 			 alpha_num[ba_node[count]->coord[X]],
 			 alpha_num[ba_node[count]->coord[Y]],
@@ -3894,41 +3998,72 @@ static char *_set_internal_wires(List nodes, int size, int conn_type)
 	return name;
 }				
 
+/*
+ * Used to find a complete path based on the conn_type for an x dim.
+ * When starting to wire a block together this should be called first.
+ *
+ * IN/OUT: results - contains the number of midplanes we are
+ *     potentially going to use in the X dim.  
+ * IN: ba_node - current node we are looking at and have already added
+ *     to results.
+ * IN: start - coordinates of the first midplane (so we know when when
+ *     to end with a torus)
+ * IN: x_size - How many midplanes are we looking for in the X dim
+ * IN: found - count of how many midplanes we have found in the x dim
+ * IN: conn_type - MESH or TORUS
+ * IN: algo - algorythm to try an allocation by
+ *
+ * RET: 0 on failure, 1 on success
+ */
 static int _find_x_path(List results, ba_node_t *ba_node, 
-	int *start, int *first, int *geometry, 
-	int found, int conn_type) 
+			int *start, int x_size, 
+			int found, int conn_type, block_algo_t algo) 
 {
 	ba_switch_t *curr_switch = NULL; 
 	ba_switch_t *next_switch = NULL; 
 	
 	int port_tar = 0;
 	int source_port=0;
-	int target_port=0;
+	int target_port=1;
 	int broke = 0, not_first = 0;
-	int ports_to_try[2] = {3,5};
+	int ports_to_try[2] = {4, 2};
 	int *node_tar = NULL;
 	int i = 0;
 	ba_node_t *next_node = NULL;
 	ba_node_t *check_node = NULL;
-/* 	int highest_phys_x = geometry[X] - start[X]; */
+/* 	int highest_phys_x = x_size - start[X]; */
 /* 	info("highest_phys_x is %d", highest_phys_x); */
 
-	ListIterator itr;
+	ListIterator itr = NULL;
 
-	if(!ba_node)
+	if(!ba_node || !results || !start)
 		return 0;
 
-	if(!source_port) {
-		target_port=1;
-		ports_to_try[0] = 4;
-		ports_to_try[1] = 2;
-			
-	}
 	curr_switch = &ba_node->axis_switch[X];
-	if(geometry[X] == 1) {
-		goto found_one;
+
+	/* we don't need to go any further */
+	if(x_size == 1) {
+		curr_switch->int_wire[source_port].used = 1;
+		curr_switch->int_wire[source_port].port_tar = target_port;
+		curr_switch->int_wire[target_port].used = 1;
+		curr_switch->int_wire[target_port].port_tar = source_port;
+		return 1;
 	}
-	debug3("found - %d",found);
+
+	if(algo == BLOCK_ALGO_FIRST) {
+		ports_to_try[0] = 4;
+		ports_to_try[1] = 2;
+	} else if(algo == BLOCK_ALGO_SECOND) {
+		ports_to_try[0] = 2;
+		ports_to_try[1] = 4;
+	} else {
+		error("Unknown algo %d", algo);
+		return 0;
+	}			
+	
+	debug3("Algo(%d) found - %d", algo, found);
+
+	/* Check the 2 ports we can leave though in ports_to_try */
 	for(i=0;i<2;i++) {
 /* 		info("trying port %d", ports_to_try[i]); */
 		/* check to make sure it isn't used */
@@ -3950,53 +4085,48 @@ static int _find_x_path(List results, ba_node_t *ba_node,
 /* 			     port_tar); */
 			/* check to see if we are back at the start of the
 			   block */
-			if((node_tar[X] == 
-			    start[X] && 
-			    node_tar[Y] == 
-			    start[Y] && 
-			    node_tar[Z] == 
-			    start[Z])) {
+			if((node_tar[X] == start[X] 
+			    && node_tar[Y] == start[Y] 
+			    && node_tar[Z] == start[Z])) {
 				broke = 1;
 				goto broke_it;
 			}
 			/* check to see if the port points to itself */
-			if((node_tar[X] == 
-			    ba_node->coord[X] && 
-			    node_tar[Y] == 
-			    ba_node->coord[Y] && 
-			    node_tar[Z] == 
-			    ba_node->coord[Z])) {
+			if((node_tar[X] == ba_node->coord[X]
+			    && node_tar[Y] == ba_node->coord[Y]
+			    && node_tar[Z] == ba_node->coord[Z])) {
 				continue;
 			}
 			/* check to see if I am going to a place I have
 			   already been before */
 			itr = list_iterator_create(results);
-			while((next_node = (ba_node_t*) list_next(itr))) {
-				debug3("looking at %c%c%c and %c%c%c",
+			while((next_node = list_next(itr))) {
+				debug3("Algo(%d) looking at %c%c%c and %c%c%c",
+				       algo,
 				       alpha_num[next_node->coord[X]],
 				       alpha_num[next_node->coord[Y]],
 				       alpha_num[next_node->coord[Z]],
 				       alpha_num[node_tar[X]],
 				       alpha_num[node_tar[Y]],
 				       alpha_num[node_tar[Z]]);
-				if((node_tar[X] == next_node->coord[X] && 
-				    node_tar[Y] == next_node->coord[Y] && 
-				    node_tar[Z] == next_node->coord[Z])) {
+				if((node_tar[X] == next_node->coord[X] 
+				    && node_tar[Y] == next_node->coord[Y]
+				    && node_tar[Z] == next_node->coord[Z])) {
 					not_first = 1;
 					break;
 				}				
 			}
 			list_iterator_destroy(itr);
-			if(not_first && found<DIM_SIZE[X]) {
-				debug2("already been there before");
+			if(not_first && found < DIM_SIZE[X]) {
+				debug2("Algo(%d) already been there before",
+				       algo);
 				not_first = 0;
 				continue;
 			} 
 			not_first = 0;
 				
 		broke_it:
-			next_node = &ba_system_ptr->
-				grid[node_tar[X]]
+			next_node = &ba_system_ptr->grid[node_tar[X]]
 #ifdef HAVE_BG
 				[node_tar[Y]]
 				[node_tar[Z]]
@@ -4004,97 +4134,36 @@ static int _find_x_path(List results, ba_node_t *ba_node,
 				;
 			next_switch = &next_node->axis_switch[X];
 
- 			if((conn_type == SELECT_MESH) 
-			   && (found == (geometry[X]))) {
-				debug2("we found the end of the mesh");
+ 			if((conn_type == SELECT_MESH) && (found == (x_size))) {
+				debug2("Algo(%d) we found the end of the mesh",
+				       algo);
 				return 1;
 			}
-			debug3("Broke = %d Found = %d geometry[X] = %d",
-			       broke, found, geometry[X]);
+			debug3("Algo(%d) Broke = %d Found = %d x_size = %d",
+			       algo, broke, found, x_size);
 
-/* This doesnt' appear to be of any use since we are doing a circular
- * system not a linear one.  Kept just to make sure.
- */
-
-/* 			debug3("Next Phys X %d Highest X %d", */
-/* 			       next_node->phys_x, highest_phys_x); */
-/* 			if(next_node->phys_x >= highest_phys_x) { */
-/* 				debug3("looking for a passthrough"); */
-/* 				if(best_path) */
-/* 					list_destroy(best_path); */
-/* 				best_path = list_create(_delete_path_list); */
-/* 				if(path) */
-/* 					list_destroy(path); */
-/* 				path = list_create(_delete_path_list); */
-	
-/* 				_find_passthrough(curr_switch, */
-/* 						  0, */
-/* 						  results, */
-/* 						  X, */
-/* 						  0, */
-/* 						  highest_phys_x); */
-/* 				if(best_count < BEST_COUNT_INIT) { */
-/* 					debug2("yes found next free %d",  */
-/* 					       best_count); */
-/* 					node_tar = _set_best_path(); */
-/* 					next_node = &ba_system_ptr-> */
-/* 						grid[node_tar[X]] */
-/* #ifdef HAVE_BG */
-/* 						[node_tar[Y]] */
-/* 						[node_tar[Z]] */
-/* #endif */
-/* 						; */
-/* 					next_switch =  */
-/* 						&next_node->axis_switch[X]; */
-					
-/* #ifdef HAVE_BG */
-/* 					debug2("found %d looking at " */
-/* 					       "%c%c%c going to %c%c%c %d", */
-/* 					       found, */
-/* 					       alpha_num[ba_node->coord[X]], */
-/* 					       alpha_num[ba_node->coord[Y]], */
-/* 					       alpha_num[ba_node->coord[Z]], */
-/* 					       alpha_num[node_tar[X]], */
-/* 					       alpha_num[node_tar[Y]], */
-/* 					       alpha_num[node_tar[Z]], */
-/* 					       port_tar); */
-/* #endif		 */
-/* 					list_append(results, next_node); */
-/* 					found++; */
-/* 					if(_find_x_path(results, next_node,  */
-/* 							start, first, geometry, */
-/* 							found, conn_type)) { */
-/* 						return 1; */
-/* 					} else { */
-/* 						found--; */
-/* 						_reset_the_path(curr_switch, 0, */
-/* 								1, X); */
-/* 						_remove_node(results,  */
-/* 							     next_node->coord); */
-/* 						return 0; */
-/* 					} */
-/* 				} */
-/* 			}			 */
-
-			if(broke && (found == geometry[X])) {
+			if(broke && (found == x_size)) {
 				goto found_path;
-			} else if(found == geometry[X]) {
-				debug2("finishing the torus!");
+			} else if(found == x_size) {
+				debug2("Algo(%d) finishing the torus!", algo);
+
 				if(best_path)
-					list_destroy(best_path);
-				best_path = list_create(_delete_path_list);
+					list_flush(best_path);
+				else
+					best_path =
+						list_create(_delete_path_list);
+
 				if(path)
-					list_destroy(path);
-				path = list_create(_delete_path_list);
-				_finish_torus(curr_switch, 
-					      0, 
-					      results, 
-					      X, 
-					      0, 
-					      start);
+					list_flush(path);
+				else
+					path = list_create(_delete_path_list);
+				
+				_finish_torus(curr_switch, 0, X, 0, start);
+
 				if(best_count < BEST_COUNT_INIT) {
-					debug2("Found a best path with %d "
-					       "steps.", best_count);
+					debug2("Algo(%d) Found a best path "
+					       "with %d steps.",
+					       algo, best_count);
 					_set_best_path();
 					return 1;
 				} else {
@@ -4105,10 +4174,11 @@ static int _find_x_path(List results, ba_node_t *ba_node,
 				continue;
 			}
 
-			if (!_node_used(next_node, geometry)) {
+			if (!_node_used(next_node, x_size)) {
 #ifdef HAVE_BG
-				debug2("found %d looking at %c%c%c "
+				debug2("Algo(%d) found %d looking at %c%c%c "
 				       "%d going to %c%c%c %d",
+				       algo,
 				       found,
 				       alpha_num[ba_node->coord[X]],
 				       alpha_num[ba_node->coord[Y]],
@@ -4120,13 +4190,11 @@ static int _find_x_path(List results, ba_node_t *ba_node,
 				       port_tar);
 #endif
 				itr = list_iterator_create(results);
-				while((check_node = 
-				       (ba_node_t*) list_next(itr))) {
-					if((node_tar[X] == 
-					    check_node->coord[X] && 
-					    node_tar[Y] == 
-					    check_node->coord[Y] && 
-					    node_tar[Z] == 
+				while((check_node = list_next(itr))) {
+					if((node_tar[X] == check_node->coord[X]
+					    && node_tar[Y] == 
+					    check_node->coord[Y]
+					    && node_tar[Z] == 
 					    check_node->coord[Z])) {
 						break;
 					}
@@ -4134,7 +4202,8 @@ static int _find_x_path(List results, ba_node_t *ba_node,
 				list_iterator_destroy(itr);
 				if(!check_node) {
 #ifdef HAVE_BG
-					debug2("add %c%c%c",
+					debug2("Algo(%d) add %c%c%c",
+					       algo,
 					       alpha_num[next_node->coord[X]],
 					       alpha_num[next_node->coord[Y]],
 					       alpha_num[next_node->coord[Z]]);
@@ -4142,8 +4211,9 @@ static int _find_x_path(List results, ba_node_t *ba_node,
 					list_append(results, next_node);
 				} else {
 #ifdef HAVE_BG
-					debug2("Hey this is already added "
-					       "%c%c%c",
+					debug2("Algo(%d) Hey this is already "
+					       "added %c%c%c",
+					       algo,
 					       alpha_num[node_tar[X]],
 					       alpha_num[node_tar[Y]],
 					       alpha_num[node_tar[Z]]);
@@ -4151,19 +4221,20 @@ static int _find_x_path(List results, ba_node_t *ba_node,
 					continue;
 				}
 				found++;
-				
+
+				/* look for the next closest midplane */
 				if(!_find_x_path(results, next_node, 
-						 start, first, geometry, 
-						 found, conn_type)) {
-					_remove_node(results,
-						     next_node->coord);
+						 start, x_size, 
+						 found, conn_type, algo)) {
+					_remove_node(results, next_node->coord);
 					found--;
 					continue;
 				} else {
 				found_path:
 #ifdef HAVE_BG
-					debug2("added node %c%c%c %d %d -> "
-					       "%c%c%c %d %d",
+					debug2("Algo(%d) added node %c%c%c "
+					       "%d %d -> %c%c%c %d %d",
+					       algo,
 					       alpha_num[ba_node->coord[X]],
 					       alpha_num[ba_node->coord[Y]],
 					       alpha_num[ba_node->coord[Z]],
@@ -4175,341 +4246,104 @@ static int _find_x_path(List results, ba_node_t *ba_node,
 					       port_tar,
 					       target_port);
 #endif					
-				found_one:			
-					if(geometry[X] != 1) {
-						curr_switch->
-							int_wire
-							[source_port].used = 1;
-						curr_switch->
-							int_wire
-							[source_port].port_tar
-							= ports_to_try[i];
-						curr_switch->
-							int_wire
-							[ports_to_try[i]].used
-							= 1;
-						curr_switch->
-							int_wire
-							[ports_to_try[i]].
-							port_tar = source_port;
+					curr_switch->int_wire[source_port].used
+						= 1;
+					curr_switch->int_wire
+						[source_port].port_tar
+						= ports_to_try[i];
+					curr_switch->int_wire
+						[ports_to_try[i]].used = 1;
+					curr_switch->int_wire
+						[ports_to_try[i]].port_tar 
+						= source_port;
 					
-						next_switch->
-							int_wire[port_tar].used
-							= 1;
-						next_switch->
-							int_wire
-							[port_tar].port_tar
-							= target_port;
-						next_switch->
-							int_wire
-							[target_port].used = 1;
-						next_switch->
-							int_wire
-							[target_port].port_tar
-							= port_tar;
-					}
+					next_switch->int_wire[port_tar].used
+						= 1;
+					next_switch->int_wire[port_tar].port_tar
+						= target_port;
+					next_switch->int_wire[target_port].used
+						= 1;
+					next_switch->int_wire
+						[target_port].port_tar
+						= port_tar;
 					return 1;
-
 				}
 			} 			
 		}
 	}
 
-	debug2("couldn't find path");
-	return 0;
-}
-
-static int _find_x_path2(List results, ba_node_t *ba_node, 
-			 int *start, int *first, int *geometry, 
-			 int found, int conn_type) 
-{
-	ba_switch_t *curr_switch = NULL; 
-	ba_switch_t *next_switch = NULL; 
-	
-	int port_tar = 0;
-	int source_port=0;
-	int target_port=0;
-	int broke = 0, not_first = 0;
-	int ports_to_try[2] = {3,5};
-	int *node_tar = NULL;
-	int i = 0;
-	ba_node_t *next_node = NULL;
-	ba_node_t *check_node = NULL;
-	
-	ListIterator itr;
-	
-	if(!ba_node)
+	if(algo == BLOCK_ALGO_FIRST) {
+		debug2("Algo(%d) couldn't find path", algo);
 		return 0;
-
-	if(!source_port) {
-		target_port=1;
-		ports_to_try[0] = 2;
-		ports_to_try[1] = 4;
+	} else if(algo == BLOCK_ALGO_SECOND) {
+#ifdef HAVE_BG
+		debug2("Algo(%d) looking for the next free node "
+		       "starting at %c%c%c",
+		       algo,
+		       alpha_num[ba_node->coord[X]],
+		       alpha_num[ba_node->coord[Y]],
+		       alpha_num[ba_node->coord[Z]]);
+#endif
+		
+		if(best_path)
+			list_flush(best_path);
+		else
+			best_path = list_create(_delete_path_list);
+		
+		if(path)
+			list_flush(path);
+		else
+			path = list_create(_delete_path_list);
+		
+		_find_next_free_using_port_2(curr_switch, 0, results, X, 0);
+		
+		if(best_count < BEST_COUNT_INIT) {
+			debug2("Algo(%d) yes found next free %d", algo,
+			       best_count);
+			node_tar = _set_best_path();
 			
-	}
-	curr_switch = &ba_node->axis_switch[X];
-	if(geometry[X] == 1) {
-		goto found_one;
-	}
-	debug2("found - %d",found);
-	for(i=0;i<2;i++) {
-		/* check to make sure it isn't used */
-		if(!curr_switch->int_wire[ports_to_try[i]].used) {
-			node_tar = curr_switch->
-				ext_wire[ports_to_try[i]].node_tar;
-			port_tar = curr_switch->
-				ext_wire[ports_to_try[i]].port_tar;
-			if((node_tar[X] == 
-			    start[X] && 
-			    node_tar[Y] == 
-			    start[Y] && 
-			    node_tar[Z] == 
-			    start[Z])) {
-				broke = 1;
-				goto broke_it;
-			}
-			if((node_tar[X] == 
-			    ba_node->coord[X] && 
-			    node_tar[Y] == 
-			    ba_node->coord[Y] && 
-			    node_tar[Z] == 
-			    ba_node->coord[Z])) {
-				continue;
-			}
-			itr = list_iterator_create(results);
-			while((next_node = (ba_node_t*) list_next(itr))) {
-				if((node_tar[X] == 
-				    next_node->coord[X] && 
-				    node_tar[Y] == 
-				    next_node->coord[Y] && 
-				    node_tar[Z] == 
-				    next_node->coord[Z])) {
-					not_first = 1;
-					break;
-				}
-				
-			}
-			list_iterator_destroy(itr);
-			if(not_first && found<DIM_SIZE[X]) {
-				not_first = 0;
-				continue;
-			} 
-			not_first = 0;
-				
-		broke_it:
-			next_node = &ba_system_ptr->
-				grid[node_tar[X]]
+			next_node = &ba_system_ptr->grid[node_tar[X]]
 #ifdef HAVE_BG
 				[node_tar[Y]]
 				[node_tar[Z]]
 #endif
 				;
-
+			
 			next_switch = &next_node->axis_switch[X];
-		
 			
- 			if((conn_type == SELECT_MESH) 
-			   && (found == (geometry[X]))) {
-				debug2("we found the end of the mesh");
+#ifdef HAVE_BG
+			debug2("Algo(%d) found %d looking at %c%c%c "
+			       "going to %c%c%c %d",
+			       algo, found,
+			       alpha_num[ba_node->coord[X]],
+			       alpha_num[ba_node->coord[Y]],
+			       alpha_num[ba_node->coord[Z]],
+			       alpha_num[node_tar[X]],
+			       alpha_num[node_tar[Y]],
+			       alpha_num[node_tar[Z]],
+			       port_tar);
+#endif		
+			list_append(results, next_node);
+			found++;
+			if(_find_x_path(results, next_node, 
+					start, x_size, found,
+					conn_type, algo)) {
 				return 1;
+			} else {
+				found--;
+				_reset_the_path(curr_switch, 0, 1, X);
+				_remove_node(results, next_node->coord);
+				debug2("Algo(%d) couldn't finish "
+				       "the path off this one", algo);
 			}
-			debug3("Broke = %d Found = %d geometry[X] = %d",
-			       broke, found, geometry[X]);
-			if(broke && (found == geometry[X])) {
-				goto found_path;
-			} else if(found == geometry[X]) {
-				debug2("finishing the torus!");
-				if(best_path)
-					list_destroy(best_path);
-				best_path = list_create(_delete_path_list);
-				if(path)
-					list_destroy(path);
-				path = list_create(_delete_path_list);
-				_finish_torus(curr_switch, 
-					      0, 
-					      results, 
-					      X, 
-					      0, 
-					      start);
-				if(best_count < BEST_COUNT_INIT) {
-					debug2("Found a best path with %d "
-					       "steps.", best_count);
-					_set_best_path();
-					return 1;
-				} else {
-					return 0;
-				}
-			} else if(broke) {
-				broke = 0;
-				continue;
-			}
-
-			if (!_node_used(next_node, geometry)) {
-#ifdef HAVE_BG
-				debug2("found %d looking at %c%c%c "
-				       "%d going to %c%c%c %d",
-				       found,
-				       alpha_num[ba_node->coord[X]],
-				       alpha_num[ba_node->coord[Y]],
-				       alpha_num[ba_node->coord[Z]],
-				       ports_to_try[i],
-				       alpha_num[node_tar[X]],
-				       alpha_num[node_tar[Y]],
-				       alpha_num[node_tar[Z]],
-				       port_tar);
-#endif
-				itr = list_iterator_create(results);
-				while((check_node = 
-				       (ba_node_t*) list_next(itr))) {
-					if((node_tar[X] == 
-					    check_node->coord[X] && 
-					    node_tar[Y] == 
-					    check_node->coord[Y] && 
-					    node_tar[Z] == 
-					    check_node->coord[Z])) {
-						break;
-					}
-				}
-				list_iterator_destroy(itr);
-				if(!check_node) {
-#ifdef HAVE_BG
-					debug2("add %c%c%c",
-					       alpha_num[next_node->coord[X]],
-					       alpha_num[next_node->coord[Y]],
-					       alpha_num[next_node->coord[Z]]);
-#endif					       
-					list_append(results, next_node);
-				} else {
-#ifdef HAVE_BG
-					debug2("Hey this is already added "
-					       "%c%c%c",
-					       alpha_num[node_tar[X]],
-					       alpha_num[node_tar[Y]],
-					       alpha_num[node_tar[Z]]);
-#endif
-					continue;
-				}
-				found++;
-				
-				if(!_find_x_path2(results, next_node, 
-						 start, first, geometry, 
-						 found, conn_type)) {
-					_remove_node(results,
-						     next_node->coord);
-					found--;
-					continue;
-				} else {
-				found_path:
-#ifdef HAVE_BG
-					debug2("added node %c%c%c %d %d -> "
-					       "%c%c%c %d %d",
-					       alpha_num[ba_node->coord[X]],
-					       alpha_num[ba_node->coord[Y]],
-					       alpha_num[ba_node->coord[Z]],
-					       source_port,
-					       ports_to_try[i],
-					       alpha_num[node_tar[X]],
-					       alpha_num[node_tar[Y]],
-					       alpha_num[node_tar[Z]],
-					       port_tar,
-					       target_port);
-#endif					
-				found_one:			
-					if(geometry[X] != 1) {
-						curr_switch->
-							int_wire
-							[source_port].used = 1;
-						curr_switch->
-							int_wire
-							[source_port].port_tar
-							= ports_to_try[i];
-						curr_switch->
-							int_wire
-							[ports_to_try[i]].used
-							= 1;
-						curr_switch->
-							int_wire
-							[ports_to_try[i]].
-							port_tar = source_port;
-					
-						next_switch->
-							int_wire[port_tar].used
-							= 1;
-						next_switch->
-							int_wire
-							[port_tar].port_tar
-							= target_port;
-						next_switch->
-							int_wire
-							[target_port].used = 1;
-						next_switch->
-							int_wire
-							[target_port].port_tar
-							= port_tar;
-					}
-					return 1;
-				}
-			} 			
-		}
+		} 
+		
+		debug2("Algo(%d) couldn't find path", algo);
+		return 0;
 	}
-#ifdef HAVE_BG
-	debug2("looking for the next free node starting at %c%c%c",
-	       alpha_num[ba_node->coord[X]],
-	       alpha_num[ba_node->coord[Y]],
-	       alpha_num[ba_node->coord[Z]]);
-#endif
-
-	if(best_path)
-		list_destroy(best_path);
-	best_path = list_create(_delete_path_list);
-	if(path)
-		list_destroy(path);
-	path = list_create(_delete_path_list);
-	
-	_find_next_free_using_port_2(curr_switch, 
-				     0, 
-				     results, 
-				     X, 
-				     0);
-	if(best_count < BEST_COUNT_INIT) {
-		debug2("yes found next free %d", best_count);
-		node_tar = _set_best_path();
-
-		next_node = &ba_system_ptr->
-			grid[node_tar[X]]
-#ifdef HAVE_BG
-			[node_tar[Y]]
-			[node_tar[Z]]
-#endif
-			;
 
-		next_switch = &next_node->axis_switch[X];
-		
-#ifdef HAVE_BG
-		debug2("found %d looking at %c%c%c going to %c%c%c %d",
-		       found,
-		       alpha_num[ba_node->coord[X]],
-		       alpha_num[ba_node->coord[Y]],
-		       alpha_num[ba_node->coord[Z]],
-		       alpha_num[node_tar[X]],
-		       alpha_num[node_tar[Y]],
-		       alpha_num[node_tar[Z]],
-		       port_tar);
-#endif		
-		list_append(results, next_node);
-		found++;
-		if(_find_x_path2(results, next_node, 
-				start, first, geometry, found, conn_type)) {
-			return 1;
-		} else {
-			found--;
-			_reset_the_path(curr_switch, 0, 1, X);
-			_remove_node(results, next_node->coord);
-			debug2("couldn't finish the path off this one");
-		}
-	} 
-	
-	debug2("couldn't find path 2");
+	error("We got here meaning there is a bad algo, "
+	      "but this should never happen algo(%d)", algo);
 	return 0;
 }
 
@@ -4614,8 +4448,8 @@ static int _find_next_free_using_port_2(ba_switch_t *curr_switch,
 		if((source_port%2))
 			target_port=1;
 		
-		list_destroy(best_path);
-		best_path = list_create(_delete_path_list);
+		list_flush(best_path);
+		
 		found = true;
 		path_add->out = target_port;
 		list_push(path, path_add);
@@ -4634,7 +4468,7 @@ static int _find_next_free_using_port_2(ba_switch_t *curr_switch,
 			temp_switch->dim = path_switch->dim;
 			temp_switch->in = path_switch->in;
 			temp_switch->out = path_switch->out;
-			list_append(best_path,temp_switch);
+			list_append(best_path, temp_switch);
 		}
 		list_iterator_destroy(itr);
 		best_count = count;
@@ -4711,205 +4545,28 @@ return_0:
 	return 0;
 }
 
-/* static int _find_passthrough(ba_switch_t *curr_switch, int source_port,  */
-/* 			     List nodes, int dim, int count, int highest_phys_x)  */
-/* { */
-/* 	ba_switch_t *next_switch = NULL;  */
-/* 	ba_path_switch_t *path_add =  */
-/* 		(ba_path_switch_t *) xmalloc(sizeof(ba_path_switch_t)); */
-/* 	ba_path_switch_t *path_switch = NULL; */
-/* 	ba_path_switch_t *temp_switch = NULL; */
-/* 	int port_tar; */
-/* 	int target_port = 0; */
-/* 	int ports_to_try[2] = {3,5}; */
-/* 	int *node_tar= curr_switch->ext_wire[0].node_tar; */
-/* 	int *node_src = curr_switch->ext_wire[0].node_tar; */
-/* 	int i; */
-/* 	int used=0; */
-/* 	int broke = 0; */
-/* 	ba_node_t *ba_node = NULL; */
-	
-/* 	ListIterator itr; */
-/* 	static bool found = false; */
-
-/* 	path_add->geometry[X] = node_src[X]; */
-/* #ifdef HAVE_BG */
-/* 	path_add->geometry[Y] = node_src[Y]; */
-/* 	path_add->geometry[Z] = node_src[Z]; */
-/* #endif */
-/* 	path_add->dim = dim; */
-/* 	path_add->in = source_port; */
-	
-/* 	if(count>=best_count) { */
-/* 		xfree(path_add); */
-/* 		return 0; */
-/* 	} */
-
-/* 	itr = list_iterator_create(nodes); */
-/* 	while((ba_node = (ba_node_t*) list_next(itr))) { */
-		
-/* #ifdef HAVE_BG */
-/* 		if(node_tar[X] == ba_node->coord[X]  */
-/* 		   && node_tar[Y] == ba_node->coord[Y]  */
-/* 		   && node_tar[Z] == ba_node->coord[Z]) { */
-/* 			broke = 1; */
-/* 			break; */
-/* 		} */
-/* #else */
-/* 		if(node_tar[X] == ba_node->coord[X]) { */
-/* 			broke = 1; */
-/* 			break; */
-/* 		} */
-/* #endif */
-		
-/* 	} */
-/* 	list_iterator_destroy(itr); */
-/* 	ba_node = &ba_system_ptr-> */
-/* 		grid[node_tar[X]] */
-/* #ifdef HAVE_BG */
-/* 		[node_tar[Y]] */
-/* 		[node_tar[Z]] */
-/* #endif */
-/* 		; */
-/* 	if(!broke && count>0 */
-/* 	   && !ba_node->used  */
-/* 	   && (ba_node->phys_x < highest_phys_x)) { */
-		
-/* 		debug3("this one not found %c%c%c", */
-/* 		       alpha_num[node_tar[X]], */
-/* 		       alpha_num[node_tar[Y]], */
-/* 		       alpha_num[node_tar[Z]]); */
-		
-/* 		broke = 0; */
-				
-/* 		if((source_port%2)) */
-/* 			target_port=1; */
-		
-/* 		list_destroy(best_path); */
-/* 		best_path = list_create(_delete_path_list); */
-/* 		found = true; */
-/* 		path_add->out = target_port; */
-/* 		list_push(path, path_add); */
-		
-/* 		itr = list_iterator_create(path); */
-/* 		while((path_switch = (ba_path_switch_t*) list_next(itr))){ */
-		
-/* 			temp_switch = (ba_path_switch_t *)  */
-/* 				xmalloc(sizeof(ba_path_switch_t)); */
-			 
-/* 			temp_switch->geometry[X] = path_switch->geometry[X]; */
-/* #ifdef HAVE_BG */
-/* 			temp_switch->geometry[Y] = path_switch->geometry[Y]; */
-/* 			temp_switch->geometry[Z] = path_switch->geometry[Z]; */
-/* #endif */
-/* 			temp_switch->dim = path_switch->dim; */
-/* 			temp_switch->in = path_switch->in; */
-/* 			temp_switch->out = path_switch->out; */
-/* 			list_append(best_path,temp_switch); */
-/* 		} */
-/* 		list_iterator_destroy(itr); */
-/* 		best_count = count; */
-/* 		return 1; */
-/* 	}  */
-
-/* 	if(source_port==0 || source_port==3 || source_port==5) { */
-/* 		if(count==0) { */
-/* 			ports_to_try[0] = 2; */
-/* 			ports_to_try[1] = 4;	 */
-/* 		} else { */
-/* 			ports_to_try[0] = 4; */
-/* 			ports_to_try[1] = 2;	 */
-/* 		} */
-/* 	} */
-			
-/* 	for(i=0;i<2;i++) { */
-/* 		used=0; */
-/* 		if(!curr_switch->int_wire[ports_to_try[i]].used) { */
-/* 			itr = list_iterator_create(path); */
-/* 			while((path_switch =  */
-/* 			       (ba_path_switch_t*) list_next(itr))){ */
-				
-/* 				if(((path_switch->geometry[X] == node_src[X])  */
-/* #ifdef HAVE_BG */
-/* 				    && (path_switch->geometry[Y]  */
-/* 					== node_src[Y]) */
-/* 				    && (path_switch->geometry[Z]  */
-/* 					== node_tar[Z]) */
-/* #endif */
-/* 					   )) { */
-					
-/* 					if( path_switch->out */
-/* 					    == ports_to_try[i]) { */
-/* 						used = 1; */
-/* 						break; */
-/* 					} */
-/* 				} */
-/* 			} */
-/* 			list_iterator_destroy(itr); */
-			
-/* 			if(curr_switch-> */
-/* 			   ext_wire[ports_to_try[i]].node_tar[X] */
-/* 			   == curr_switch->ext_wire[0].node_tar[X]   */
-/* #ifdef HAVE_BG */
-/* 			   && curr_switch-> */
-/* 			   ext_wire[ports_to_try[i]].node_tar[Y]  */
-/* 			   == curr_switch->ext_wire[0].node_tar[Y]  */
-/* 			   && curr_switch-> */
-/* 			   ext_wire[ports_to_try[i]].node_tar[Z]  */
-/* 			   == curr_switch->ext_wire[0].node_tar[Z] */
-/* #endif */
-/* 				) { */
-/* 				continue; */
-/* 			} */
-						
-/* 			if(!used) { */
-/* 				port_tar = curr_switch-> */
-/* 					ext_wire[ports_to_try[i]].port_tar; */
-/* 				node_tar = curr_switch-> */
-/* 					ext_wire[ports_to_try[i]].node_tar; */
-				
-/* 				next_switch = &ba_system_ptr-> */
-/* 					grid[node_tar[X]] */
-/* #ifdef HAVE_BG */
-/* 					[node_tar[Y]] */
-/* 					[node_tar[Z]] */
-/* #endif */
-/* 					.axis_switch[X]; */
-				
-/* 				count++; */
-/* 				path_add->out = ports_to_try[i]; */
-/* 				list_push(path, path_add); */
-/* 				debug3("looking at this one " */
-/* 				       "%c%c%c %d -> %c%c%c %d", */
-/* 				       alpha_num[ba_node->coord[X]], */
-/* 				       alpha_num[ba_node->coord[Y]], */
-/* 				       alpha_num[ba_node->coord[Z]], */
-/* 				       ports_to_try[i], */
-/* 				       alpha_num[node_tar[X]], */
-/* 				       alpha_num[node_tar[Y]], */
-/* 				       alpha_num[node_tar[Z]], */
-/* 				       port_tar); */
-		
-/* 				_find_passthrough(next_switch, port_tar, nodes, */
-/* 						dim, count, highest_phys_x); */
-/* 				while((temp_switch = list_pop(path))  */
-/* 				      != path_add){ */
-/* 					xfree(temp_switch); */
-/* 					debug3("something here 2"); */
-/* 				} */
-/* 			} */
-/* 		} */
-/* 	} */
-/* 	xfree(path_add); */
-/* 	return 0; */
-/* } */
+/*
+ * Used to tie the end of the block to the start. best_path and path
+ * should both be set up before calling this function.
+ *
+ * IN: curr_switch -
+ * IN: source_port - 
+ * IN: dim -
+ * IN: count -
+ * IN: start -
+ * 
+ * RET: 0 on failure, 1 on success
+ *
+ * Sets up global variable best_path, and best_count.  On success
+ * best_count will be >= BEST_COUNT_INIT you can call _set_best_path
+ * to apply this path to the main system (ba_system_ptr)
+ */
 
 static int _finish_torus(ba_switch_t *curr_switch, int source_port,
-			 List nodes, int dim, int count, int *start)
+			 int dim, int count, int *start)
 {
 	ba_switch_t *next_switch = NULL;
-	ba_path_switch_t *path_add =
-		(ba_path_switch_t *) xmalloc(sizeof(ba_path_switch_t));
+	ba_path_switch_t *path_add = xmalloc(sizeof(ba_path_switch_t));
 	ba_path_switch_t *path_switch = NULL;
 	ba_path_switch_t *temp_switch = NULL;
 	int port_tar;
@@ -4945,18 +4602,16 @@ static int _finish_torus(ba_switch_t *curr_switch, int source_port,
 			target_port=1;
 		if(!curr_switch->int_wire[target_port].used) {
 			
-			list_destroy(best_path);
-			best_path = list_create(_delete_path_list);
+			list_flush(best_path);
+			
 			found = true;
 			path_add->out = target_port;
 			list_push(path, path_add);
 			
 			itr = list_iterator_create(path);
-			while((path_switch =
-			       (ba_path_switch_t*) list_next(itr))){
+			while((path_switch = list_next(itr))) {
 				
-				temp_switch = (ba_path_switch_t *)
-					xmalloc(sizeof(ba_path_switch_t));
+				temp_switch = xmalloc(sizeof(ba_path_switch_t));
 				
 				temp_switch->geometry[X] =
 					path_switch->geometry[X];
@@ -4986,8 +4641,7 @@ static int _finish_torus(ba_switch_t *curr_switch, int source_port,
 		used=0;
 		if(!curr_switch->int_wire[ports_to_try[i]].used) {
 			itr = list_iterator_create(path);
-			while((path_switch =
-			       (ba_path_switch_t*) list_next(itr))){
+			while((path_switch = list_next(itr))){
 				
 				if(((path_switch->geometry[X] == node_src[X])
 #ifdef HAVE_BG
@@ -5022,8 +4676,7 @@ static int _finish_torus(ba_switch_t *curr_switch, int source_port,
 				node_tar = curr_switch->
 					ext_wire[ports_to_try[i]].node_tar;
 				
-				next_switch = &ba_system_ptr->
-					grid[node_tar[X]]
+				next_switch = &ba_system_ptr->grid[node_tar[X]]
 #ifdef HAVE_BG
 					[node_tar[Y]]
 					[node_tar[Z]]
@@ -5034,8 +4687,8 @@ static int _finish_torus(ba_switch_t *curr_switch, int source_port,
 				count++;
 				path_add->out = ports_to_try[i];
 				list_push(path, path_add);
-				_finish_torus(next_switch, port_tar, nodes,
-						dim, count, start);
+				_finish_torus(next_switch, port_tar, 
+					      dim, count, start);
 				while((temp_switch = list_pop(path))
 				      != path_add){
 					xfree(temp_switch);
@@ -5048,14 +4701,22 @@ static int _finish_torus(ba_switch_t *curr_switch, int source_port,
        return 0;
 }
 
+/*
+ * using best_path set up previously from _finish_torus or
+ * _find_next_free_using_port_2.  Will set up the path contained there
+ * into the main virtual system.  With will also set the passthrough
+ * flag if there was a passthrough used.
+ */
 static int *_set_best_path()
 {
 	ListIterator itr;
 	ba_path_switch_t *path_switch = NULL;
 	ba_switch_t *curr_switch = NULL; 
 	int *geo = NULL;
+
 	if(!best_path)
 		return NULL;
+
 	itr = list_iterator_create(best_path);
 	while((path_switch = (ba_path_switch_t*) list_next(itr))) {
 		if(passthrough && path_switch->in > 1 && path_switch->out > 1) {
@@ -5070,15 +4731,13 @@ static int *_set_best_path()
 		       path_switch->in, path_switch->out);
 		if(!geo)
 			geo = path_switch->geometry;
-		curr_switch = &ba_system_ptr->
-			grid
+		curr_switch = &ba_system_ptr->grid
 			[path_switch->geometry[X]]
 			[path_switch->geometry[Y]]
 			[path_switch->geometry[Z]].  
 			axis_switch[path_switch->dim];
 #else
-		curr_switch = &ba_system_ptr->
-			grid[path_switch->geometry[X]].
+		curr_switch = &ba_system_ptr->grid[path_switch->geometry[X]].
 			axis_switch[path_switch->dim];
 #endif
 	
diff --git a/src/plugins/select/bluegene/plugin/bg_job_place.c b/src/plugins/select/bluegene/plugin/bg_job_place.c
index 4bc37379f..5d9e9c7ef 100644
--- a/src/plugins/select/bluegene/plugin/bg_job_place.c
+++ b/src/plugins/select/bluegene/plugin/bg_job_place.c
@@ -2,7 +2,7 @@
  *  bg_job_place.c - blue gene job placement (e.g. base block selection)
  *  functions.
  *
- *  $Id: bg_job_place.c 14952 2008-09-03 16:08:14Z da $ 
+ *  $Id: bg_job_place.c 15372 2008-10-10 15:52:40Z da $ 
  *****************************************************************************
  *  Copyright (C) 2004-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -83,7 +83,7 @@ static bg_record_t *_find_matching_block(List block_list,
 					 bitstr_t* slurm_block_bitmap,
 					 ba_request_t *request,
 					 uint32_t max_procs,
-					 int allow, int check_image,
+					 int *allow, int check_image,
 					 int overlap_check,
 					 List overlapped_list,
 					 bool test_only);
@@ -365,7 +365,7 @@ static bg_record_t *_find_matching_block(List block_list,
 					 bitstr_t* slurm_block_bitmap,
 					 ba_request_t *request,
 					 uint32_t max_procs,
-					 int allow, int check_image,
+					 int *allow, int check_image,
 					 int overlap_check,
 					 List overlapped_list,
 					 bool test_only)
@@ -380,7 +380,7 @@ static bg_record_t *_find_matching_block(List block_list,
 	      test_only);
 		
 	itr = list_iterator_create(block_list);
-	while ((bg_record = (bg_record_t*) list_next(itr))) {		
+	while ((bg_record = list_next(itr))) {		
 		/* If test_only we want to fall through to tell the 
 		   scheduler that it is runnable just not right now. 
 		*/
@@ -454,22 +454,22 @@ static bg_record_t *_find_matching_block(List block_list,
 			if(request->blrtsimage &&
 			   strcasecmp(request->blrtsimage,
 				      bg_record->blrtsimage)) {
-				allow = 1;
+				*allow = 1;
 				continue;
 			} else if(request->linuximage &&
 			   strcasecmp(request->linuximage,
 				      bg_record->linuximage)) {
-				allow = 1;
+				*allow = 1;
 				continue;
 			} else if(request->mloaderimage &&
 			   strcasecmp(request->mloaderimage, 
 				      bg_record->mloaderimage)) {
-				allow = 1;
+				*allow = 1;
 				continue;
 			} else if(request->ramdiskimage &&
 			   strcasecmp(request->ramdiskimage,
 				      bg_record->ramdiskimage)) {
-				allow = 1;
+				*allow = 1;
 				continue;
 			}			
 		}
@@ -636,6 +636,8 @@ static int _check_for_booted_overlapping_blocks(
 					 * bg_record
 					*/
 					list_remove(bg_record_itr);
+					slurm_mutex_lock(&block_state_mutex);
+
 					if(bg_record->original) {
 						debug3("This was a copy");
 						found_record =
@@ -651,8 +653,10 @@ static int _check_for_booted_overlapping_blocks(
 					}
 					destroy_bg_record(bg_record);
 					if(!found_record) {
-						error("1 this record wasn't "
-						      "found in the list!");
+						debug2("This record wasn't "
+						       "found in the bg_list, "
+						       "no big deal, it "
+						       "probably wasn't added");
 						//rc = SLURM_ERROR;
 					} else {
 						List temp_list =
@@ -663,6 +667,7 @@ static int _check_for_booted_overlapping_blocks(
 						free_block_list(temp_list);
 						list_destroy(temp_list);
 					}
+					slurm_mutex_unlock(&block_state_mutex);
 				} 
 				rc = 1;
 					
@@ -977,7 +982,7 @@ static int _find_best_block_match(List block_list,
 						 slurm_block_bitmap,
 						 &request,
 						 max_procs,
-						 allow, check_image,
+						 &allow, check_image,
 						 overlap_check, 
 						 overlapped_list,
 						 test_only);
@@ -1228,7 +1233,7 @@ extern int submit_job(struct job_record *job_ptr, bitstr_t *slurm_block_bitmap,
 	uint16_t tmp16 = (uint16_t)NO_VAL;
 	List block_list = NULL;
 	int blocks_added = 0;
-	int starttime = time(NULL);
+	time_t starttime = time(NULL);
 	bool test_only;
 
 	if (mode == SELECT_MODE_TEST_ONLY || mode == SELECT_MODE_WILL_RUN)
@@ -1380,7 +1385,7 @@ extern int test_job_list(List req_list)
 //	uint16_t tmp16 = (uint16_t)NO_VAL;
 	List block_list = NULL;
 	int blocks_added = 0;
-	int starttime = time(NULL);
+	time_t starttime = time(NULL);
 	ListIterator itr = NULL;
 	select_will_run_t *will_run = NULL;
 
@@ -1420,10 +1425,22 @@ extern int test_job_list(List req_list)
 		
 		if(rc == SLURM_SUCCESS) {
 			if(bg_record) {
-				if(bg_record->job_ptr
-				   && bg_record->job_ptr->end_time) {
-					starttime =
-						bg_record->job_ptr->end_time;
+				/* Here we see if there is a job running since
+				 * some jobs take awhile to finish we need to
+				 * make sure the time of the end is in the
+				 * future.  If it isn't (meaning it is in the
+				 * past or current time) we add 5 seconds to
+				 * it so we don't use the block immediately.
+				 */
+				if(bg_record->job_ptr 
+				   && bg_record->job_ptr->end_time) { 
+					if(bg_record->job_ptr->end_time <= 
+					   starttime)
+						starttime += 5;
+					else {
+						starttime = bg_record->
+							    job_ptr->end_time;
+					}
 				}
 				bg_record->job_running =
 					will_run->job_ptr->job_id;
diff --git a/src/plugins/select/bluegene/plugin/bg_job_run.c b/src/plugins/select/bluegene/plugin/bg_job_run.c
index 20f326d46..37141f15b 100644
--- a/src/plugins/select/bluegene/plugin/bg_job_run.c
+++ b/src/plugins/select/bluegene/plugin/bg_job_run.c
@@ -2,7 +2,7 @@
  *  bg_job_run.c - blue gene job execution (e.g. initiation and termination) 
  *  functions.
  *
- *  $Id: bg_job_run.c 14938 2008-08-29 21:49:01Z da $ 
+ *  $Id: bg_job_run.c 15085 2008-09-16 20:24:05Z da $ 
  *****************************************************************************
  *  Copyright (C) 2004-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -546,7 +546,7 @@ static void _start_agent(bg_update_t *bg_update_ptr)
 				      slurm_strerror(rc));
 				job_fail(bg_update_ptr->job_ptr->job_id);
 			}
-			lock_slurmctld(job_write_lock);
+			unlock_slurmctld(job_write_lock);
 
 			slurm_mutex_unlock(&job_start_mutex);
 			return;
@@ -1183,7 +1183,21 @@ extern int boot_block(bg_record_t *bg_record)
 	    != STATUS_OK) {
 		error("bridge_create_block(%s): %s",
 		      bg_record->bg_block_id, bg_err_str(rc));
-		
+		if(rc == INCOMPATIBLE_STATE) {
+			char reason[128], time_str[32];
+			time_t now = time(NULL);
+			slurm_make_time_str(&now, time_str, sizeof(time_str));
+			snprintf(reason, sizeof(reason),
+				 "boot_block: "
+				 "Block %s is in an incompatable state.  "
+				 "This usually means hardware is allocated "
+				 "by another block (maybe outside of SLURM). "
+				 "[SLURM@%s]", 
+				 bg_record->bg_block_id, time_str);
+			drain_as_needed(bg_record, reason);
+			bg_record->boot_state = 0;
+			bg_record->boot_count = 0;
+		}
 		return SLURM_ERROR;
 	}
 	
diff --git a/src/plugins/select/bluegene/plugin/bg_record_functions.h b/src/plugins/select/bluegene/plugin/bg_record_functions.h
index 0c5486931..d168814d1 100644
--- a/src/plugins/select/bluegene/plugin/bg_record_functions.h
+++ b/src/plugins/select/bluegene/plugin/bg_record_functions.h
@@ -72,11 +72,11 @@ typedef struct bg_record {
 					   being modified or not at
 					   job launch usually */
 	uid_t user_uid;   		/* Owner of block uid	*/
-	rm_partition_state_t state;   	/* the allocated block   */
+	rm_partition_state_t state;     /* Current state of the block */
 	int start[BA_SYSTEM_DIMENSIONS];/* start node */
 	uint16_t geo[BA_SYSTEM_DIMENSIONS];  /* geometry */
-	rm_connection_type_t conn_type;	/* Mesh or Torus or NAV */
-	rm_partition_mode_t node_use;	/* either COPROCESSOR or VIRTUAL */
+	rm_connection_type_t conn_type;  /* MESH or Torus or NAV */
+	rm_partition_mode_t node_use;	 /* either COPROCESSOR or VIRTUAL */
 	rm_partition_t *bg_block;       /* structure to hold info from db2 */
 	List bg_block_list;             /* node list of blocks in block */
 	int bp_count;                   /* size */
diff --git a/src/plugins/select/bluegene/plugin/bluegene.c b/src/plugins/select/bluegene/plugin/bluegene.c
index 95d731690..b12227dca 100644
--- a/src/plugins/select/bluegene/plugin/bluegene.c
+++ b/src/plugins/select/bluegene/plugin/bluegene.c
@@ -1,7 +1,7 @@
 /*****************************************************************************\
  *  bluegene.c - blue gene node configuration processing module. 
  *
- *  $Id: bluegene.c 14952 2008-09-03 16:08:14Z da $
+ *  $Id: bluegene.c 15370 2008-10-09 23:00:27Z da $
  *****************************************************************************
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -488,7 +488,6 @@ extern bg_record_t *find_and_remove_org_from_bg_list(List my_list,
 		if(bit_equal(bg_record->bitmap, found_record->bitmap)
 		   && bit_equal(bg_record->ionode_bitmap,
 				found_record->ionode_bitmap)) {
-			
 			if(!strcmp(bg_record->bg_block_id,
 				   found_record->bg_block_id)) {
 				list_remove(itr);
diff --git a/src/plugins/select/bluegene/plugin/dynamic_block.c b/src/plugins/select/bluegene/plugin/dynamic_block.c
index c83042801..19d279b59 100644
--- a/src/plugins/select/bluegene/plugin/dynamic_block.c
+++ b/src/plugins/select/bluegene/plugin/dynamic_block.c
@@ -232,7 +232,7 @@ no_list:
 		else
 			results = list_create(NULL);
 		if (!allocate_block(request, results)) {
-			debug("allocate failure for size %d base partitions", 
+			debug2("allocate failure for size %d base partitions", 
 			       request->size);
 			rc = SLURM_ERROR;
 		}
diff --git a/src/plugins/select/cons_res/select_cons_res.c b/src/plugins/select/cons_res/select_cons_res.c
index 79b595cdb..7841fd699 100644
--- a/src/plugins/select/cons_res/select_cons_res.c
+++ b/src/plugins/select/cons_res/select_cons_res.c
@@ -2,7 +2,7 @@
  *  select_cons_res.c - node selection plugin supporting consumable 
  *  resources policies.
  *
- *  $Id: select_cons_res.c 14907 2008-08-26 22:27:26Z jette $
+ *  $Id: select_cons_res.c 15113 2008-09-19 00:35:14Z jette $
  *****************************************************************************\
  *
  *  The following example below illustrates how four jobs are allocated
@@ -2300,6 +2300,7 @@ static int _will_run_test(struct job_record *job_ptr, bitstr_t *bitmap,
 	bitstr_t *orig_map;
 	int rc = SLURM_ERROR;
 	uint16_t saved_state;
+	time_t now = time(NULL);
 
 	orig_map = bit_copy(bitmap);
 
@@ -2357,7 +2358,10 @@ static int _will_run_test(struct job_record *job_ptr, bitstr_t *bitmap,
 			       req_nodes, SELECT_MODE_WILL_RUN, job_node_req,
 			       exp_node_cr);
 		if (rc == SLURM_SUCCESS) {
-			job_ptr->start_time = tmp_job_ptr->end_time;
+			if (tmp_job_ptr->end_time <= now)
+				 job_ptr->start_time = now + 1;
+			else
+				job_ptr->start_time = tmp_job_ptr->end_time;
 			break;
 		}
 	}
diff --git a/src/plugins/select/linear/select_linear.c b/src/plugins/select/linear/select_linear.c
index 787580d21..8c5f3e421 100644
--- a/src/plugins/select/linear/select_linear.c
+++ b/src/plugins/select/linear/select_linear.c
@@ -1538,6 +1538,7 @@ static int _will_run_test(struct job_record *job_ptr, bitstr_t *bitmap,
 	bitstr_t *orig_map;
 	int i, rc = SLURM_ERROR;
 	int max_run_jobs = max_share - 1;	/* exclude this job */
+	time_t now = time(NULL);
 
 	orig_map = bit_copy(bitmap);
 
@@ -1596,7 +1597,10 @@ static int _will_run_test(struct job_record *job_ptr, bitstr_t *bitmap,
 			       req_nodes);
 		if (rc != SLURM_SUCCESS)
 			continue;
-		job_ptr->start_time = tmp_job_ptr->end_time;
+		if (tmp_job_ptr->end_time <= now)
+			job_ptr->start_time = now + 1;
+		else
+			job_ptr->start_time = tmp_job_ptr->end_time;
 		break;
 	}
 	list_iterator_destroy(job_iterator);
diff --git a/src/sacct/options.c b/src/sacct/options.c
index b7c2737fc..98bbde8c4 100644
--- a/src/sacct/options.c
+++ b/src/sacct/options.c
@@ -414,7 +414,7 @@ void _help_msg(void)
 	       "-c, --completion\n"
 	       "    Use job completion instead of accounting data.\n"
 	       "-C, --cluster\n"
-	       "    Only send data about this cluster.\n"
+	       "    Only send data about this cluster -1 for all clusters.\n"
 	       "-d, --dump\n"
 	       "    Dump the raw data records\n"
 	       "--duplicates\n"
@@ -622,7 +622,7 @@ void parse_command_line(int argc, char **argv)
 	char *dot = NULL;
 	bool brief_output = FALSE, long_output = FALSE;
 	bool all_users = 0;
-
+	bool all_clusters = 0;
 	static struct option long_options[] = {
 		{"all", 0,0, 'a'},
 		{"accounts", 1, 0, 'A'},
@@ -684,12 +684,17 @@ void parse_command_line(int argc, char **argv)
 			brief_output = true;
 			break;
 		case 'B':
-			params.opt_begin = parse_time(optarg);
+			params.opt_begin = parse_time(optarg, 1);
 			break;
 		case 'c':
 			params.opt_completion = 1;
 			break;
 		case 'C':
+			if(!strcasecmp(optarg, "-1")) {
+				all_clusters = 1;
+				break;
+			}
+			all_clusters=0;
 			if(!params.opt_cluster_list) 
 				params.opt_cluster_list =
 					list_create(slurm_destroy_char);
@@ -746,7 +751,7 @@ void parse_command_line(int argc, char **argv)
 		break;
 		
 		case 'E':
-			params.opt_end = parse_time(optarg);
+			params.opt_end = parse_time(optarg, 1);
 			break;
 		case 'F':
 			if(params.opt_stat)
@@ -834,6 +839,7 @@ void parse_command_line(int argc, char **argv)
 				all_users = 1;
 				break;
 			}
+			all_users = 0;
 			if(!params.opt_uid_list)
 				params.opt_uid_list = 
 					list_create(slurm_destroy_char);
@@ -921,7 +927,7 @@ void parse_command_line(int argc, char **argv)
 		xfree(acct_type);
 	} else {
 		slurm_acct_storage_init(params.opt_filein);
-		acct_db_conn = acct_storage_g_get_connection(false, false);
+		acct_db_conn = acct_storage_g_get_connection(false, 0, false);
 		
 		acct_type = slurm_get_accounting_storage_type();
 		if ((strcmp(acct_type, "accounting_storage/none") == 0)
@@ -934,7 +940,15 @@ void parse_command_line(int argc, char **argv)
 	}
 
 	/* specific clusters requested? */
-	if (params.opt_verbose && params.opt_cluster_list 
+	if(all_clusters) {
+		if(params.opt_cluster_list 
+		   && list_count(params.opt_cluster_list)) {
+			list_destroy(params.opt_cluster_list);
+			params.opt_cluster_list = NULL;
+		}
+		if(params.opt_verbose)
+			fprintf(stderr, "Clusters requested:\n\t: all\n");
+	} else if (params.opt_verbose && params.opt_cluster_list 
 	    && list_count(params.opt_cluster_list)) {
 		fprintf(stderr, "Clusters requested:\n");
 		itr = list_iterator_create(params.opt_cluster_list);
@@ -1123,9 +1137,6 @@ void do_dump(void)
 	
 	itr = list_iterator_create(jobs);
 	while((job = list_next(itr))) {
-		if (params.opt_uid>=0)
-			if (job->uid != params.opt_uid)
-				continue;
 		if(job->sacct.min_cpu == (float)NO_VAL)
 			job->sacct.min_cpu = 0;
 		
@@ -1298,13 +1309,11 @@ void do_dump_completion(void)
 		       job->gid_name, job->node_cnt, job->nodelist, 
 		       job->jobname, job->state,
 		       job->timelimit);
-#ifdef HAVE_BG
 		if(job->blockid)
 			printf(" %s %s %s %s %u %s %s",
 			       job->blockid, job->connection, job->reboot,
 			       job->rotate, job->max_procs, job->geo,
 			       job->bg_start_point);
-#endif
 		printf("\n");
 	}
 	list_iterator_destroy(itr);
@@ -1380,6 +1389,8 @@ void do_list(void)
 			while((step = list_next(itr_step))) {
 				if(step->end == 0)
 					step->end = job->end;
+				step->associd = job->associd;
+				step->cluster = job->cluster;
 				step->account = job->account;
 				print_fields(JOBSTEP, step);
 			} 
diff --git a/src/sacct/print.c b/src/sacct/print.c
index a1023454e..58fdd010c 100644
--- a/src/sacct/print.c
+++ b/src/sacct/print.c
@@ -133,7 +133,7 @@ void print_elapsed(type_t type, void *object)
 		printf("%-15s", "Elapsed");
 		break;
 	case UNDERSCORE:
-		printf("%-15s", "---------------");
+		printf("%-15.15s", "---------------");
 		break;
 	case JOB:
 		_elapsed_time(job->elapsed, 0, str);
@@ -257,19 +257,19 @@ void print_job(type_t type, void *object)
 
 	switch(type) {
 	case HEADLINE:
-		printf("%-8s", "Job");
+		printf("%-12s", "Job");
 		break;
 	case UNDERSCORE:
-		printf("%-8s", "--------");
+		printf("%-12s", "------------");
 		break;
 	case JOB:
-		printf("%-8u", job->jobid);
+		printf("%-12u", job->jobid);
 		break;
 	case JOBSTEP:
-		printf("%-8s", " ");
+		printf("%-12s", " ");
 		break;
 	default:
-		printf("%-8s", "n/a");
+		printf("%-12s", "n/a");
 		break;
 	} 
 }
@@ -490,7 +490,6 @@ void print_partition(type_t type, void *object)
 	} 
 }
 
-#ifdef HAVE_BG
 void print_blockid(type_t type, void *object)
 { 
 	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
@@ -529,7 +528,6 @@ void print_blockid(type_t type, void *object)
 		break;
 	} 
 }
-#endif
 
 void print_pages(type_t type, void *object)
 { 
@@ -706,7 +704,7 @@ void print_submit(type_t type, void *object)
 		printf("%-14s", "Submit Time");
 		break;
 	case UNDERSCORE:
-		printf("%-14s", "--------------");
+		printf("%-14.14s", "--------------");
 		break;
 	case JOB:
 		slurm_make_time_str(&job->submit, 
@@ -731,32 +729,32 @@ void print_start(type_t type, void *object)
 	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
 	jobcomp_job_rec_t *jobcomp = (jobcomp_job_rec_t *)object;
 	jobacct_step_rec_t *step = (jobacct_step_rec_t *)object;
-	char time_str[32];
+	char time_str[19];
 	
 	switch(type) {
 	case HEADLINE:
-		printf("%-19s", "Start Time");
+		printf("%-14s", "Start Time");
 		break;
 	case UNDERSCORE:
-		printf("%-19s", "--------------------");
+		printf("%-14.14s", "-------------------");
 		break;
 	case JOB:
 		slurm_make_time_str(&job->start, 
 				    time_str, 
 				    sizeof(time_str));
-		printf("%-19s", time_str);
+		printf("%-14s", time_str);
 		break;
 	case JOBCOMP:
-		printf("%-19s", jobcomp->start_time);
+		printf("%-14s", jobcomp->start_time);
 		break;
 	case JOBSTEP:
 		slurm_make_time_str(&step->start, 
 				    time_str, 
 				    sizeof(time_str));
-		printf("%-19s", time_str);
+		printf("%-14s", time_str);
 		break;
 	default:
-		printf("%-19s", "n/a");
+		printf("%-14s", "n/a");
 		break;
 	} 
 }
@@ -790,28 +788,28 @@ void print_end(type_t type, void *object)
 	
 	switch(type) {
 	case HEADLINE:
-		printf("%-19s", "End Time");
+		printf("%-14s", "End Time");
 		break;
 	case UNDERSCORE:
-		printf("%-19s", "--------------------");
+		printf("%-14.14s", "--------------------");
 		break;
 	case JOB:
 		slurm_make_time_str(&job->end, 
 				    time_str, 
 				    sizeof(time_str));
-		printf("%-19s", time_str);
+		printf("%-14s", time_str);
 		break;
 	case JOBCOMP:
-		printf("%-19s", jobcomp->end_time);
+		printf("%-14s", jobcomp->end_time);
 		break;
 	case JOBSTEP:
 		slurm_make_time_str(&step->end, 
 				    time_str, 
 				    sizeof(time_str));
-		printf("%-19s", time_str);
+		printf("%-14s", time_str);
 		break;
 	default:
-		printf("%-19s", "n/a");
+		printf("%-14s", "n/a");
 		break;
 	} 
 }
@@ -1074,7 +1072,7 @@ void print_account(type_t type, void *object)
 
 	switch(type) {
 	case HEADLINE:
-		printf("%-16s", "account");
+		printf("%-16s", "Account");
 		break;
 	case UNDERSCORE:
 		printf("%-16s", "----------------");
@@ -1102,8 +1100,72 @@ void print_account(type_t type, void *object)
 	}
 }
 
+void print_assoc(type_t type, void *object)
+{
+	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
+	jobacct_step_rec_t *step = (jobacct_step_rec_t *)object;
+
+	switch(type) {
+	case HEADLINE:
+		printf("%-16s", "AssociationID");
+		break;
+	case UNDERSCORE:
+		printf("%-16s", "----------------");
+		break;
+	case JOB:
+		if(!job->associd)
+			printf("%-16s", "unknown");
+		else 
+			printf("%-16u", job->associd);
+		break;
+	case JOBSTEP:
+		if(!step->associd)
+			printf("%-16s", "unknown");
+		else 
+			printf("%-16u", step->associd);
+		break;
+	default:
+		printf("%-16s", "n/a");
+		break;
+		break;
+	}
+}
+
+void print_cluster(type_t type, void *object)
+{
+	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
+	jobacct_step_rec_t *step = (jobacct_step_rec_t *)object;
+
+	switch(type) {
+	case HEADLINE:
+		printf("%-16s", "Cluster");
+		break;
+	case UNDERSCORE:
+		printf("%-16s", "----------------");
+		break;
+	case JOB:
+		if(!job->cluster)
+			printf("%-16s", "unknown");
+		else if(strlen(job->cluster)<17)
+			printf("%-16s", job->cluster);
+		else
+			printf("%-13.13s...", job->cluster);
+		break;
+	case JOBSTEP:
+		if(!step->cluster)
+			printf("%-16s", "unknown");
+		else if(strlen(step->cluster)<17)
+			printf("%-16s", step->cluster);
+		else
+			printf("%-13.13s...", step->cluster);
+		break;
+	default:
+		printf("%-16s", "n/a");
+		break;
+		break;
+	}
+}
 
-#ifdef HAVE_BG
 void print_connection(type_t type, void *object)
 {
 	jobcomp_job_rec_t *job = (jobcomp_job_rec_t *)object;
@@ -1218,5 +1280,4 @@ void print_bg_start_point(type_t type, void *object)
 		break;
 	}
 }
-#endif
 
diff --git a/src/sacct/sacct.c b/src/sacct/sacct.c
index f1b968dda..851977ce1 100644
--- a/src/sacct/sacct.c
+++ b/src/sacct/sacct.c
@@ -153,6 +153,8 @@ void _print_header(void);
  */
 sacct_parameters_t params;
 fields_t fields[] = {{"account", print_account},
+		     {"associd", print_assoc},
+		     {"cluster", print_cluster},
 		     {"cpu", print_cpu},
 		     {"cputime", print_cputime}, 
 		     {"elapsed", print_elapsed},
@@ -183,7 +185,6 @@ fields_t fields[] = {{"account", print_account},
 		     {"user", print_user}, 
 		     {"usercpu", print_usercpu}, 
 		     {"vsize", print_vsize}, 
-#ifdef HAVE_BG
 		     {"blockid", print_blockid}, 
 		     {"connection", print_connection}, 
 		     {"geo", print_geo}, 
@@ -191,7 +192,6 @@ fields_t fields[] = {{"account", print_account},
 		     {"reboot", print_reboot}, 
 		     {"rotate", print_rotate}, 
 		     {"bg_start_point", print_bg_start_point}, 		     
-#endif
 		     {NULL, NULL}};
 
 List jobs = NULL;
diff --git a/src/sacct/sacct.h b/src/sacct/sacct.h
index 23966f2d2..40d166744 100644
--- a/src/sacct/sacct.h
+++ b/src/sacct/sacct.h
@@ -142,15 +142,16 @@ void print_usercpu(type_t type, void *object);
 void print_vsize(type_t type, void *object);
 void print_cputime(type_t type, void *object);
 void print_account(type_t type, void *object);
+void print_assoc(type_t type, void *object);
+void print_cluster(type_t type, void *object);
+
 
-#ifdef HAVE_BG
 void print_connection(type_t type, void *object);
 void print_geo(type_t type, void *object);
 void print_max_procs(type_t type, void *object);
 void print_reboot(type_t type, void *object);
 void print_rotate(type_t type, void *object);
 void print_bg_start_point(type_t type, void *object);
-#endif
 
 /* options.c */
 int decode_state_char(char *state);
diff --git a/src/sacctmgr/account_functions.c b/src/sacctmgr/account_functions.c
index 725d9ca98..eb656fd7f 100644
--- a/src/sacctmgr/account_functions.c
+++ b/src/sacctmgr/account_functions.c
@@ -47,7 +47,8 @@ static int _set_cond(int *start, int argc, char *argv[],
 	int u_set = 0;
 	int end = 0;
 	List qos_list = NULL;
-
+	acct_association_cond_t *assoc_cond = NULL;
+		     
 	if(!acct_cond) {
 		exit_code=1;
 		fprintf(stderr, "No acct_cond given");
@@ -57,42 +58,51 @@ static int _set_cond(int *start, int argc, char *argv[],
 	if(!acct_cond->assoc_cond) {
 		acct_cond->assoc_cond = 
 			xmalloc(sizeof(acct_association_cond_t));
-		acct_cond->assoc_cond->fairshare = NO_VAL;
-		acct_cond->assoc_cond->max_cpu_secs_per_job = NO_VAL;
-		acct_cond->assoc_cond->max_jobs = NO_VAL;
-		acct_cond->assoc_cond->max_nodes_per_job = NO_VAL;
-		acct_cond->assoc_cond->max_wall_duration_per_job = NO_VAL;
 	}
 
+	assoc_cond = acct_cond->assoc_cond;
+
 	for (i=(*start); i<argc; i++) {
 		end = parse_option_end(argv[i]);
 		if (!strncasecmp (argv[i], "Set", 3)) {
 			i--;
 			break;
-		} else if (!strncasecmp (argv[i], "WithAssoc", 5)) {
+		} else if (!end && 
+			   !strncasecmp (argv[i], "WithAssoc", 5)) {
 			acct_cond->with_assocs = 1;
-		} else if (!strncasecmp (argv[i], "WithCoordinators", 5)) {
+		} else if (!end && 
+			   !strncasecmp (argv[i], "WithCoordinators", 5)) {
 			acct_cond->with_coords = 1;
+		} else if (!end && 
+			   !strncasecmp (argv[i], "WithRawQOS", 5)) {
+			assoc_cond->with_raw_qos = 1;
+		} else if (!end && !strncasecmp (argv[i], "WOPInfo", 4)) {
+			assoc_cond->without_parent_info = 1;
+		} else if (!end && !strncasecmp (argv[i], "WOPLimits", 4)) {
+			assoc_cond->without_parent_limits = 1;
+		} else if (!end && 
+			   !strncasecmp (argv[i], "WithSubAccounts", 5)) {
+			assoc_cond->with_sub_accts = 1;
 		} else if(!end && !strncasecmp(argv[i], "where", 5)) {
 			continue;
 		} else if(!end
 			  || !strncasecmp (argv[i], "Names", 1)
 			  || !strncasecmp (argv[i], "Accouts", 1)) {
-			if(!acct_cond->assoc_cond->acct_list) {
-				acct_cond->assoc_cond->acct_list = 
+			if(!assoc_cond->acct_list) {
+				assoc_cond->acct_list = 
 					list_create(slurm_destroy_char);
 			}
 			if(slurm_addto_char_list(
-				   acct_cond->assoc_cond->acct_list,
+				   assoc_cond->acct_list,
 				   argv[i]+end)) 
 				u_set = 1;
 		} else if (!strncasecmp (argv[i], "Clusters", 1)) {
-			if(!acct_cond->assoc_cond->cluster_list) {
-				acct_cond->assoc_cond->cluster_list = 
+			if(!assoc_cond->cluster_list) {
+				assoc_cond->cluster_list = 
 					list_create(slurm_destroy_char);
 			}
 			if(slurm_addto_char_list(
-				   acct_cond->assoc_cond->cluster_list,
+				   assoc_cond->cluster_list,
 				   argv[i]+end))
 				a_set = 1;
 		} else if (!strncasecmp (argv[i], "Descriptions", 1)) {
@@ -106,6 +116,105 @@ static int _set_cond(int *start, int argc, char *argv[],
 		} else if (!strncasecmp (argv[i], "Format", 1)) {
 			if(format_list)
 				slurm_addto_char_list(format_list, argv[i]+end);
+		} else if (!strncasecmp (argv[i], "FairShare", 1)) {
+			if(!assoc_cond->fairshare_list)
+				assoc_cond->fairshare_list =
+					list_create(slurm_destroy_char);
+			if(slurm_addto_char_list(assoc_cond->fairshare_list,
+					argv[i]+end))
+				a_set = 1;
+		} else if (!strncasecmp (argv[i], "GrpCPUMins", 7)) {
+			if(!assoc_cond->grp_cpu_mins_list)
+				assoc_cond->grp_cpu_mins_list =
+					list_create(slurm_destroy_char);
+			if(slurm_addto_char_list(assoc_cond->grp_cpu_mins_list,
+					argv[i]+end))
+				a_set = 1;
+		} else if (!strncasecmp (argv[i], "GrpCpus", 7)) {
+			if(!assoc_cond->grp_cpus_list)
+				assoc_cond->grp_cpus_list =
+					list_create(slurm_destroy_char);
+			if(slurm_addto_char_list(assoc_cond->grp_cpus_list,
+					argv[i]+end))
+				a_set = 1;
+		} else if (!strncasecmp (argv[i], "GrpJobs", 4)) {
+			if(!assoc_cond->grp_jobs_list)
+				assoc_cond->grp_jobs_list =
+					list_create(slurm_destroy_char);
+			if(slurm_addto_char_list(assoc_cond->grp_jobs_list,
+					argv[i]+end))
+				a_set = 1;
+		} else if (!strncasecmp (argv[i], "GrpNodes", 4)) {
+			if(!assoc_cond->grp_nodes_list)
+				assoc_cond->grp_nodes_list =
+					list_create(slurm_destroy_char);
+			if(slurm_addto_char_list(assoc_cond->grp_nodes_list,
+					argv[i]+end))
+				a_set = 1;
+		} else if (!strncasecmp (argv[i], "GrpSubmitJobs", 4)) {
+			if(!assoc_cond->grp_submit_jobs_list)
+				assoc_cond->grp_submit_jobs_list =
+					list_create(slurm_destroy_char);
+			if(slurm_addto_char_list(
+				   assoc_cond->grp_submit_jobs_list,
+				   argv[i]+end))
+				a_set = 1;
+		} else if (!strncasecmp (argv[i], "GrpWall", 4)) {
+			if(!assoc_cond->grp_wall_list)
+				assoc_cond->grp_wall_list =
+					list_create(slurm_destroy_char);
+			if(slurm_addto_char_list(
+				   assoc_cond->grp_wall_list,
+				   argv[i]+end))
+				a_set = 1;
+		} else if (!strncasecmp (argv[i], "MaxCPUMins", 7)) {
+			if(!assoc_cond->max_cpu_mins_pj_list)
+				assoc_cond->max_cpu_mins_pj_list =
+					list_create(slurm_destroy_char);
+			if(slurm_addto_char_list(
+				   assoc_cond->max_cpu_mins_pj_list,
+				   argv[i]+end))
+				a_set = 1;
+		} else if (!strncasecmp (argv[i], "MaxCpus", 7)) {
+			if(!assoc_cond->max_cpus_pj_list)
+				assoc_cond->max_cpus_pj_list =
+					list_create(slurm_destroy_char);
+			if(slurm_addto_char_list(
+				   assoc_cond->max_cpus_pj_list,
+				   argv[i]+end))
+				a_set = 1;
+		} else if (!strncasecmp (argv[i], "MaxJobs", 4)) {
+			if(!assoc_cond->max_jobs_list)
+				assoc_cond->max_jobs_list =
+					list_create(slurm_destroy_char);
+			if(slurm_addto_char_list(
+				   assoc_cond->max_jobs_list,
+				   argv[i]+end))
+				a_set = 1;
+		} else if (!strncasecmp (argv[i], "MaxNodes", 4)) {
+			if(!assoc_cond->max_nodes_pj_list)
+				assoc_cond->max_nodes_pj_list =
+					list_create(slurm_destroy_char);
+			if(slurm_addto_char_list(
+				   assoc_cond->max_nodes_pj_list,
+				   argv[i]+end))
+				a_set = 1;
+		} else if (!strncasecmp (argv[i], "MaxSubmitJobs", 4)) {
+			if(!assoc_cond->max_submit_jobs_list)
+				assoc_cond->max_submit_jobs_list =
+					list_create(slurm_destroy_char);
+			if(slurm_addto_char_list(
+				   assoc_cond->max_submit_jobs_list,
+				   argv[i]+end))
+				a_set = 1;
+		} else if (!strncasecmp (argv[i], "MaxWall", 4)) {
+			if(!assoc_cond->max_wall_pj_list)
+				assoc_cond->max_wall_pj_list =
+					list_create(slurm_destroy_char);
+			if(slurm_addto_char_list(
+				   assoc_cond->max_wall_pj_list,
+				   argv[i]+end))
+				a_set = 1;
 		} else if (!strncasecmp (argv[i], "Organizations", 1)) {
 			if(!acct_cond->organization_list) {
 				acct_cond->organization_list = 
@@ -115,13 +224,17 @@ static int _set_cond(int *start, int argc, char *argv[],
 						 argv[i]+end))
 				u_set = 1;
 		} else if (!strncasecmp (argv[i], "Parent", 1)) {
-			acct_cond->assoc_cond->parent_acct =
-				strip_quotes(argv[i]+end, NULL);
-			a_set = 1;
+			if(!assoc_cond->parent_acct_list) {
+				assoc_cond->parent_acct_list = 
+					list_create(slurm_destroy_char);
+			}
+			if(slurm_addto_char_list(assoc_cond->parent_acct_list,
+						 argv[i]+end))
+				a_set = 1;
 		} else if (!strncasecmp (argv[i], "QosLevel", 1)) {
 			int option = 0;
-			if(!acct_cond->qos_list) {
-				acct_cond->qos_list = 
+			if(!assoc_cond->qos_list) {
+				assoc_cond->qos_list = 
 					list_create(slurm_destroy_char);
 			}
 			
@@ -130,9 +243,16 @@ static int _set_cond(int *start, int argc, char *argv[],
 					db_conn, my_uid, NULL);
 			}
 			
-			addto_qos_char_list(acct_cond->qos_list, qos_list,
-					    argv[i]+end, option);
-			u_set = 1;
+			if(end > 2 && argv[i][end-1] == '='
+			   && (argv[i][end-2] == '+' 
+			       || argv[i][end-2] == '-'))
+				option = (int)argv[i][end-2];
+
+			if(addto_qos_char_list(assoc_cond->qos_list, qos_list,
+					       argv[i]+end, option))
+				a_set = 1;
+			else
+				exit_code = 1;
 		} else {
 			exit_code=1;
 			fprintf(stderr, " Unknown condition: %s\n"
@@ -157,6 +277,8 @@ static int _set_cond(int *start, int argc, char *argv[],
 }
 
 static int _set_rec(int *start, int argc, char *argv[],
+		    List acct_list,
+		    List cluster_list,
 		    acct_account_rec_t *acct,
 		    acct_association_rec_t *assoc)
 {
@@ -173,35 +295,107 @@ static int _set_rec(int *start, int argc, char *argv[],
 			break;
 		} else if(!end && !strncasecmp(argv[i], "set", 3)) {
 			continue;
-		} else if(!end) {
-			exit_code=1;
-			fprintf(stderr, 
-				" Bad format on %s: End your option with "
-			       "an '=' sign\n", argv[i]);
+		} else if(!end
+			  || !strncasecmp (argv[i], "Account", 1)
+			  || !strncasecmp (argv[i], "Names", 1)) {
+			if(acct_list) 
+				slurm_addto_char_list(acct_list, argv[i]+end);
+				
+		} else if (!strncasecmp (argv[i], "Cluster", 1)) {
+			if(cluster_list)
+				slurm_addto_char_list(cluster_list,
+						      argv[i]+end);
 		} else if (!strncasecmp (argv[i], "Description", 1)) {
 			acct->description =  strip_quotes(argv[i]+end, NULL);
 			u_set = 1;
 		} else if (!strncasecmp (argv[i], "FairShare", 1)) {
+			if(!assoc)
+				continue;
 			if (get_uint(argv[i]+end, &assoc->fairshare, 
 				     "FairShare") == SLURM_SUCCESS)
 				a_set = 1;
-		} else if (!strncasecmp (argv[i], "MaxCPUSec", 4)) {
-			if (get_uint(argv[i]+end, &assoc->max_cpu_secs_per_job,
-				     "MaxCPUSec") == SLURM_SUCCESS)
+		} else if (!strncasecmp (argv[i], "GrpCPUMins", 7)) {
+			if(!assoc)
+				continue;
+			if (get_uint64(argv[i]+end, 
+				       &assoc->grp_cpu_mins, 
+				       "GrpCPUMins") == SLURM_SUCCESS)
+				a_set = 1;
+		} else if (!strncasecmp (argv[i], "GrpCpus", 7)) {
+			if(!assoc)
+				continue;
+			if (get_uint(argv[i]+end, &assoc->grp_cpus,
+			    "GrpCpus") == SLURM_SUCCESS)
+				a_set = 1;
+		} else if (!strncasecmp (argv[i], "GrpJobs", 4)) {
+			if(!assoc)
+				continue;
+			if (get_uint(argv[i]+end, &assoc->grp_jobs,
+			    "GrpJobs") == SLURM_SUCCESS)
+				a_set = 1;
+		} else if (!strncasecmp (argv[i], "GrpNodes", 4)) {
+			if(!assoc)
+				continue;
+			if (get_uint(argv[i]+end, &assoc->grp_nodes,
+			    "GrpNodes") == SLURM_SUCCESS)
+				a_set = 1;
+		} else if (!strncasecmp (argv[i], "GrpSubmitJobs", 4)) {
+			if(!assoc)
+				continue;
+			if (get_uint(argv[i]+end, &assoc->grp_submit_jobs,
+			    "GrpSubmitJobs") == SLURM_SUCCESS)
+				a_set = 1;
+		} else if (!strncasecmp (argv[i], "GrpWall", 4)) {
+			if(!assoc)
+				continue;
+			mins = time_str2mins(argv[i]+end);
+			if (mins != NO_VAL) {
+				assoc->grp_wall	= (uint32_t) mins;
+				a_set = 1;
+			} else {
+				exit_code=1;
+				fprintf(stderr, 
+					" Bad GrpWall time format: %s\n", 
+					argv[i]);
+			}
+		} else if (!strncasecmp (argv[i], "MaxCPUMins", 7)) {
+			if(!assoc)
+				continue;
+			if (get_uint64(argv[i]+end, 
+				       &assoc->max_cpu_mins_pj, 
+				       "MaxCPUMins") == SLURM_SUCCESS)
+				a_set = 1;
+		} else if (!strncasecmp (argv[i], "MaxCpus", 7)) {
+			if(!assoc)
+				continue;
+			if (get_uint(argv[i]+end, &assoc->max_cpus_pj,
+			    "MaxCpus") == SLURM_SUCCESS)
 				a_set = 1;
 		} else if (!strncasecmp (argv[i], "MaxJobs", 4)) {
+			if(!assoc)
+				continue;
 			if (get_uint(argv[i]+end, &assoc->max_jobs,
-				     "MaxJobs") == SLURM_SUCCESS)
+			    "MaxJobs") == SLURM_SUCCESS)
 				a_set = 1;
 		} else if (!strncasecmp (argv[i], "MaxNodes", 4)) {
-			if (get_uint(argv[i]+end, &assoc->max_nodes_per_job,
-				     "MaxNodes") == SLURM_SUCCESS)
+			if(!assoc)
+				continue;
+			if (get_uint(argv[i]+end, 
+			    &assoc->max_nodes_pj,
+			    "MaxNodes") == SLURM_SUCCESS)
+				a_set = 1;
+		} else if (!strncasecmp (argv[i], "MaxSubmitJobs", 4)) {
+			if(!assoc)
+				continue;
+			if (get_uint(argv[i]+end, &assoc->max_submit_jobs,
+			    "MaxSubmitJobs") == SLURM_SUCCESS)
 				a_set = 1;
 		} else if (!strncasecmp (argv[i], "MaxWall", 4)) {
+			if(!assoc)
+				continue;
 			mins = time_str2mins(argv[i]+end);
 			if (mins != NO_VAL) {
-				assoc->max_wall_duration_per_job 
-					= (uint32_t) mins;
+				assoc->max_wall_pj = (uint32_t) mins;
 				a_set = 1;
 			} else {
 				exit_code=1;
@@ -213,27 +407,32 @@ static int _set_rec(int *start, int argc, char *argv[],
 			acct->organization = strip_quotes(argv[i]+end, NULL);
 			u_set = 1;
 		} else if (!strncasecmp (argv[i], "Parent", 1)) {
+			if(!assoc)
+				continue;
 			assoc->parent_acct = strip_quotes(argv[i]+end, NULL);
 			a_set = 1;
-		} else if (!strncasecmp (argv[i], "QosLevel=", 1)) {
+		} else if (!strncasecmp (argv[i], "QosLevel", 1)) {
 			int option = 0;
-			if(!acct->qos_list) {
-				acct->qos_list = 
+			if(!assoc)
+				continue;
+			if(!assoc->qos_list) 
+				assoc->qos_list = 
 					list_create(slurm_destroy_char);
-			}
-			
-			if(!qos_list) {
+						
+			if(!qos_list) 
 				qos_list = acct_storage_g_get_qos(
 					db_conn, my_uid, NULL);
-			}
+						
 			if(end > 2 && argv[i][end-1] == '='
 			   && (argv[i][end-2] == '+' 
 			       || argv[i][end-2] == '-'))
 				option = (int)argv[i][end-2];
 
-			addto_qos_char_list(acct->qos_list, qos_list,
-					    argv[i]+end, option);
-			u_set = 1;
+			if(addto_qos_char_list(assoc->qos_list,
+					       qos_list, argv[i]+end, option))
+				a_set = 1;
+			else
+				exit_code = 1;
 		} else {
 			exit_code=1;
 			fprintf(stderr, " Unknown option: %s\n"
@@ -241,17 +440,19 @@ static int _set_rec(int *start, int argc, char *argv[],
 				argv[i]);
 		}
 	}
-	if(qos_list)
-		list_destroy(qos_list);
 
 	(*start) = i;
 
+	if(qos_list)
+		list_destroy(qos_list);
+
 	if(u_set && a_set)
 		return 3;
 	else if(a_set)
 		return 2;
 	else if(u_set)
 		return 1;
+
 	return 0;
 }
 
@@ -290,156 +491,87 @@ static int _isdefault(List acct_list)
 extern int sacctmgr_add_account(int argc, char *argv[])
 {
 	int rc = SLURM_SUCCESS;
-	int i=0, mins;
+	int i=0;
 	ListIterator itr = NULL, itr_c = NULL;
 	acct_account_rec_t *acct = NULL;
 	acct_association_rec_t *assoc = NULL;
 	acct_association_cond_t assoc_cond;
 	List name_list = list_create(slurm_destroy_char);
 	List cluster_list = list_create(slurm_destroy_char);
-	char *description = NULL;
-	char *organization = NULL;
-	char *parent = NULL;
 	char *cluster = NULL;
 	char *name = NULL;
-	List add_qos_list = NULL;
-	List qos_list = NULL;
 	List acct_list = NULL;
 	List assoc_list = NULL;
 	List local_assoc_list = NULL;
 	List local_account_list = NULL;
-	uint32_t fairshare = NO_VAL; 
-	uint32_t max_jobs = NO_VAL;
-	uint32_t max_nodes_per_job = NO_VAL;
-	uint32_t max_wall_duration_per_job = NO_VAL;
-	uint32_t max_cpu_secs_per_job = NO_VAL;
 	char *acct_str = NULL;
 	char *assoc_str = NULL;
 	int limit_set = 0;
+	acct_account_rec_t *start_acct = xmalloc(sizeof(acct_account_rec_t));
+	acct_association_rec_t *start_assoc =
+		xmalloc(sizeof(acct_association_rec_t));
 	
-	for (i=0; i<argc; i++) {
-		int end = parse_option_end(argv[i]);
-		if(!end) {
-			slurm_addto_char_list(name_list, argv[i]+end);
-		} else if (!strncasecmp (argv[i], "Cluster", 1)) {
-			slurm_addto_char_list(cluster_list, argv[i]+end);
-		} else if (!strncasecmp (argv[i], "Description", 1)) {
-			description = strip_quotes(argv[i]+end, NULL);
-		} else if (!strncasecmp (argv[i], "FairShare", 1)) {
-			if (get_uint(argv[i]+end, &fairshare, 
-			    "FairShare") == SLURM_SUCCESS)
-				limit_set = 1;
-		} else if (!strncasecmp (argv[i], "MaxCPUSecs", 4)) {
-			if (get_uint(argv[i]+end, &max_cpu_secs_per_job, 
-			    "MaxCPUSecs") == SLURM_SUCCESS)
-				limit_set = 1;
-		} else if (!strncasecmp (argv[i], "MaxJobs", 4)) {
-			if (get_uint(argv[i]+end, &max_jobs, 
-			    "MaxJobs") == SLURM_SUCCESS)
-				limit_set = 1;
-		} else if (!strncasecmp (argv[i], "MaxNodes", 4)) {
-			if (get_uint(argv[i]+end, &max_nodes_per_job, 
-			    "MaxNodes") == SLURM_SUCCESS)
-				limit_set = 1;
-		} else if (!strncasecmp (argv[i], "MaxWall", 4)) {
-			mins = time_str2mins(argv[i]+end);
-			if (mins != NO_VAL) {
-				max_wall_duration_per_job = (uint32_t) mins;
-				limit_set = 1;
-			} else {
-				exit_code=1;
-				fprintf(stderr, 
-					" Bad MaxWall time format: %s\n", 
-					argv[i]);
-			}
-		} else if (!strncasecmp (argv[i], "Names", 1)) {
-			slurm_addto_char_list(name_list, argv[i]+end);
-		} else if (!strncasecmp (argv[i], "Organization", 1)) {
-			organization = strip_quotes(argv[i]+end, NULL);
-		} else if (!strncasecmp (argv[i], "Parent", 1)) {
-			parent = strip_quotes(argv[i]+end, NULL);
-		} else if (!strncasecmp (argv[i], "QosLevel", 1)) {
-			int option = 0;
-			if(!add_qos_list) {
-				add_qos_list = 
-					list_create(slurm_destroy_char);
-			}
-			
-			if(!qos_list) {
-				qos_list = acct_storage_g_get_qos(
-					db_conn, my_uid, NULL);
-			}
-			addto_qos_char_list(add_qos_list, qos_list,
-					    argv[i]+end, option);
-		} else {
-			exit_code=1;
-			fprintf(stderr, " Unknown option: %s\n", argv[i]);
-		}		
-	}
+	init_acct_association_rec(start_assoc);
+
+	for (i=0; i<argc; i++) 
+		limit_set = _set_rec(&i, argc, argv, name_list, cluster_list,
+				     start_acct, start_assoc);
+
+	if(exit_code) 
+		return SLURM_ERROR;
 
-	if(!list_count(name_list)) {
+	if(!name_list || !list_count(name_list)) {
 		list_destroy(name_list);
 		list_destroy(cluster_list);
-		xfree(parent);
-		xfree(description);
-		xfree(organization);
+		destroy_acct_association_rec(start_assoc);
+		destroy_acct_account_rec(start_acct);
 		exit_code=1;
 		fprintf(stderr, " Need name of account to add.\n"); 
 		return SLURM_SUCCESS;
 	} else {
 		acct_account_cond_t account_cond;
-		acct_association_cond_t assoc_cond;
-
 		memset(&account_cond, 0, sizeof(acct_account_cond_t));
 		memset(&assoc_cond, 0, sizeof(acct_association_cond_t));
+
 		assoc_cond.acct_list = name_list;
 		account_cond.assoc_cond = &assoc_cond;
 
 		local_account_list = acct_storage_g_get_accounts(
-			db_conn, my_uid, &account_cond);
-		
+			db_conn, my_uid, &account_cond);		
 	}
+
 	if(!local_account_list) {
 		exit_code=1;
 		fprintf(stderr, " Problem getting accounts from database.  "
 			"Contact your admin.\n");
 		list_destroy(name_list);
 		list_destroy(cluster_list);
-		xfree(parent);
-		xfree(description);
-		xfree(organization);
+		destroy_acct_association_rec(start_assoc);
+		destroy_acct_account_rec(start_acct);
 		return SLURM_ERROR;
 	}
 
-	if(!parent)
-		parent = xstrdup("root");
+	if(!start_assoc->parent_acct)
+		start_assoc->parent_acct = xstrdup("root");
 
-	if(!list_count(cluster_list)) {
-		List temp_list = NULL;
+	if(!cluster_list || !list_count(cluster_list)) {
 		acct_cluster_rec_t *cluster_rec = NULL;
-
-		temp_list = acct_storage_g_get_clusters(db_conn, my_uid, NULL);
-		if(!cluster_list) {
+		List tmp_list =
+			acct_storage_g_get_clusters(db_conn, my_uid, NULL);
+		if(!tmp_list) {
 			exit_code=1;
 			fprintf(stderr, 
 				" Problem getting clusters from database.  "
 			       "Contact your admin.\n");
 			list_destroy(name_list);
 			list_destroy(cluster_list);
+			destroy_acct_association_rec(start_assoc);
+			destroy_acct_account_rec(start_acct);
 			list_destroy(local_account_list);
-			xfree(parent);
-			xfree(description);
-			xfree(organization);
 			return SLURM_ERROR;
 		}
-
-		itr_c = list_iterator_create(temp_list);
-		while((cluster_rec = list_next(itr_c))) {
-			list_append(cluster_list, xstrdup(cluster_rec->name));
-		}
-		list_iterator_destroy(itr_c);
-
-		if(!list_count(cluster_list)) {
+		
+		if(!list_count(tmp_list)) {
 			exit_code=1;
 			fprintf(stderr, 
 				"  Can't add accounts, no cluster "
@@ -447,12 +579,22 @@ extern int sacctmgr_add_account(int argc, char *argv[])
 				" Please contact your administrator.\n");
 			list_destroy(name_list);
 			list_destroy(cluster_list);
+			destroy_acct_association_rec(start_assoc);
+			destroy_acct_account_rec(start_acct);
 			list_destroy(local_account_list);
-			xfree(parent);
-			xfree(description);
-			xfree(organization);
 			return SLURM_ERROR; 
 		}
+		if(!cluster_list)
+			list_create(slurm_destroy_char);
+		else
+			list_flush(cluster_list);
+
+		itr_c = list_iterator_create(tmp_list);
+		while((cluster_rec = list_next(itr_c))) {
+			list_append(cluster_list, xstrdup(cluster_rec->name));
+		}
+		list_iterator_destroy(itr_c);
+		list_destroy(tmp_list);
 	} else {
 		List temp_list = NULL;
 		acct_cluster_cond_t cluster_cond;
@@ -486,9 +628,10 @@ extern int sacctmgr_add_account(int argc, char *argv[])
 		list_iterator_destroy(itr);
 		list_iterator_destroy(itr_c);
 		list_destroy(temp_list);
+
 		if(!list_count(cluster_list)) {
-			list_destroy(name_list);
-			list_destroy(cluster_list);
+			destroy_acct_association_rec(start_assoc);
+			destroy_acct_account_rec(start_acct);
 			list_destroy(local_account_list);
 			return SLURM_ERROR;
 		}
@@ -502,11 +645,10 @@ extern int sacctmgr_add_account(int argc, char *argv[])
 
 	assoc_cond.acct_list = list_create(NULL);
 	itr = list_iterator_create(name_list);
-	while((name = list_next(itr))) {
+	while((name = list_next(itr))) 
 		list_append(assoc_cond.acct_list, name);
-	}
 	list_iterator_destroy(itr);
-	list_append(assoc_cond.acct_list, parent);
+	list_append(assoc_cond.acct_list, start_assoc->parent_acct);
 
 	assoc_cond.cluster_list = cluster_list;
 	local_assoc_list = acct_storage_g_get_associations(
@@ -518,10 +660,9 @@ extern int sacctmgr_add_account(int argc, char *argv[])
 		       "Contact your admin.\n");
 		list_destroy(name_list);
 		list_destroy(cluster_list);
+		destroy_acct_association_rec(start_assoc);
+		destroy_acct_account_rec(start_acct);
 		list_destroy(local_account_list);
-		xfree(parent);
-		xfree(description);
-		xfree(organization);
 		return SLURM_ERROR;
 	}
 
@@ -530,31 +671,24 @@ extern int sacctmgr_add_account(int argc, char *argv[])
 		acct = NULL;
 		if(!sacctmgr_find_account_from_list(local_account_list, name)) {
 			acct = xmalloc(sizeof(acct_account_rec_t));
-			acct->assoc_list = list_create(NULL);	
+			acct->assoc_list = 
+				list_create(destroy_acct_association_rec);	
 			acct->name = xstrdup(name);
-			if(description) 
-				acct->description = xstrdup(description);
+			if(start_acct->description) 
+				acct->description =
+					xstrdup(start_acct->description);
 			else
 				acct->description = xstrdup(name);
 
-			if(organization)
-				acct->organization = xstrdup(organization);
-			else if(strcmp(parent, "root"))
-				acct->organization = xstrdup(parent);
+			if(start_acct->organization)
+				acct->organization = 
+					xstrdup(start_acct->organization);
+			else if(strcmp(start_assoc->parent_acct, "root"))
+				acct->organization =
+					xstrdup(start_assoc->parent_acct);
 			else
 				acct->organization = xstrdup(name);
-			if(add_qos_list && list_count(add_qos_list)) {
-				char *tmp_qos = NULL;
-				ListIterator qos_itr = 
-					list_iterator_create(add_qos_list);
-				acct->qos_list = 
-					list_create(slurm_destroy_char);
-				while((tmp_qos = list_next(qos_itr))) {
-					list_append(acct->qos_list,
-						    xstrdup(tmp_qos));
-				}
-				list_iterator_destroy(qos_itr);
-			}
+
 			xstrfmtcat(acct_str, "  %s\n", name);
 			list_append(acct_list, acct);
 		}
@@ -567,28 +701,41 @@ extern int sacctmgr_add_account(int argc, char *argv[])
 				continue;
 			}
 			if(!sacctmgr_find_account_base_assoc_from_list(
-				   local_assoc_list, parent, cluster)) {
+				   local_assoc_list, start_assoc->parent_acct,
+				   cluster)) {
 				exit_code=1;
 				fprintf(stderr, " Parent account '%s' "
-				       "doesn't exist on "
-				       "cluster %s\n"
-				       "        Contact your admin "
-				       "to add this account.\n",
-				       parent, cluster);
+					"doesn't exist on "
+					"cluster %s\n"
+					"        Contact your admin "
+					"to add this account.\n",
+					start_assoc->parent_acct, cluster);
 				continue;
 			}
 
 			assoc = xmalloc(sizeof(acct_association_rec_t));
+			init_acct_association_rec(assoc);
 			assoc->acct = xstrdup(name);
 			assoc->cluster = xstrdup(cluster);
-			assoc->parent_acct = xstrdup(parent);
-			assoc->fairshare = fairshare;
-			assoc->max_jobs = max_jobs;
-			assoc->max_nodes_per_job = max_nodes_per_job;
-			assoc->max_wall_duration_per_job =
-				max_wall_duration_per_job;
-			assoc->max_cpu_secs_per_job = 
-				max_cpu_secs_per_job;
+			assoc->parent_acct = xstrdup(start_assoc->parent_acct);
+			assoc->fairshare = start_assoc->fairshare;
+
+			assoc->grp_cpu_mins = start_assoc->grp_cpu_mins;
+			assoc->grp_cpus = start_assoc->grp_cpus;
+			assoc->grp_jobs = start_assoc->grp_jobs;
+			assoc->grp_nodes = start_assoc->grp_nodes;
+			assoc->grp_submit_jobs = start_assoc->grp_submit_jobs;
+			assoc->grp_wall = start_assoc->grp_wall;
+
+			assoc->max_cpu_mins_pj = start_assoc->max_cpu_mins_pj;
+			assoc->max_cpus_pj = start_assoc->max_cpus_pj;
+			assoc->max_jobs = start_assoc->max_jobs;
+			assoc->max_nodes_pj = start_assoc->max_nodes_pj;
+			assoc->max_submit_jobs = start_assoc->max_submit_jobs;
+			assoc->max_wall_pj = start_assoc->max_wall_pj;
+
+			assoc->qos_list = copy_char_list(start_assoc->qos_list);
+
 			if(acct) 
 				list_append(acct->assoc_list, assoc);
 			else 
@@ -605,8 +752,7 @@ extern int sacctmgr_add_account(int argc, char *argv[])
 	list_iterator_destroy(itr);
 	list_destroy(local_account_list);
 	list_destroy(local_assoc_list);
-	list_destroy(name_list);
-	list_destroy(cluster_list);
+
 
 	if(!list_count(acct_list) && !list_count(assoc_list)) {
 		printf(" Nothing new added.\n");
@@ -620,25 +766,19 @@ extern int sacctmgr_add_account(int argc, char *argv[])
 	if(acct_str) {
 		printf(" Adding Account(s)\n%s", acct_str);
 		printf(" Settings\n");
-		if(description)
-			printf("  Description     = %s\n", description);
+		if(start_acct->description)
+			printf("  Description     = %s\n", 
+			       start_acct->description);
 		else
 			printf("  Description     = %s\n", "Account Name");
 			
-		if(organization)
-			printf("  Organization    = %s\n", organization);
+		if(start_acct->organization)
+			printf("  Organization    = %s\n",
+			       start_acct->organization);
 		else
 			printf("  Organization    = %s\n",
 			       "Parent/Account Name");
 
-		if(add_qos_list) {
-			char *temp_char = get_qos_complete_str(
-				qos_list, add_qos_list);
-			if(temp_char) {		
-				printf("  Qos             = %s\n", temp_char);
-				xfree(temp_char);
-			}
-		}
 		xfree(acct_str);
 	}
 
@@ -649,35 +789,7 @@ extern int sacctmgr_add_account(int argc, char *argv[])
 
 	if(limit_set) {
 		printf(" Settings\n");
-		if(fairshare == INFINITE)
-			printf("  Fairshare       = NONE\n");
-		else if(fairshare != NO_VAL) 
-			printf("  Fairshare       = %u\n", fairshare);
-		
-		if(max_cpu_secs_per_job == INFINITE)
-			printf("  MaxCPUSecs      = NONE\n");
-		else if(max_cpu_secs_per_job != NO_VAL) 
-			printf("  MaxCPUSecs      = %u\n",
-			       max_cpu_secs_per_job);
-		
-		if(max_jobs == INFINITE) 
-			printf("  MaxJobs         = NONE\n");
-		else if(max_jobs != NO_VAL) 
-			printf("  MaxJobs         = %u\n", max_jobs);
-		
-		if(max_nodes_per_job == INFINITE)
-			printf("  MaxNodes        = NONE\n");
-		else if(max_nodes_per_job != NO_VAL)
-			printf("  MaxNodes        = %u\n", max_nodes_per_job);
-		
-		if(max_wall_duration_per_job == INFINITE) 
-			printf("  MaxWall         = NONE\n");		
-		else if(max_wall_duration_per_job != NO_VAL) {
-			char time_buf[32];
-			mins2time_str((time_t) max_wall_duration_per_job, 
-				      time_buf, sizeof(time_buf));
-			printf("  MaxWall         = %s\n", time_buf);
-		}
+		sacctmgr_print_assoc_limits(start_assoc);
 	}
 	
 	notice_thread_init();
@@ -713,15 +825,14 @@ extern int sacctmgr_add_account(int argc, char *argv[])
 	}
 
 end_it:
-	if(add_qos_list)
-		list_destroy(add_qos_list);
+	list_destroy(name_list);
+	list_destroy(cluster_list);
 	list_destroy(acct_list);
-	list_destroy(assoc_list);
-		
-	xfree(parent);
-	xfree(description);
-	xfree(organization);
-
+	list_destroy(assoc_list);		
+	
+	destroy_acct_association_rec(start_assoc);
+	destroy_acct_account_rec(start_acct);
+	
 	return rc;
 }
 
@@ -752,10 +863,18 @@ extern int sacctmgr_list_account(int argc, char *argv[])
 		PRINT_COORDS,
 		PRINT_DESC,
 		PRINT_FAIRSHARE,
+		PRINT_GRPCM,
+		PRINT_GRPC,
+		PRINT_GRPJ,
+		PRINT_GRPN,
+		PRINT_GRPS,
+		PRINT_GRPW,
 		PRINT_ID,
 		PRINT_MAXC,
+		PRINT_MAXCM,
 		PRINT_MAXJ,
 		PRINT_MAXN,
+		PRINT_MAXS,
 		PRINT_MAXW,
 		PRINT_ORG,
 		PRINT_QOS,
@@ -775,10 +894,11 @@ extern int sacctmgr_list_account(int argc, char *argv[])
 		list_destroy(format_list);
 		return SLURM_ERROR;
 	} else if(!list_count(format_list)) {
-		slurm_addto_char_list(format_list, "A,D,O,Q");
+		slurm_addto_char_list(format_list, "A,D,O");
 		if(acct_cond->with_assocs)
 			slurm_addto_char_list(format_list,
-					"Cl,ParentN,U,F,MaxC,MaxJ,MaxN,MaxW");
+					      "Cl,ParentN,U,F,GrpJ,GrpN,GrpS,"
+					      "MaxJ,MaxN,MaxS,MaxW,QOS");
 			
 		if(acct_cond->with_coords)
 			slurm_addto_char_list(format_list, "Coord");
@@ -800,6 +920,7 @@ extern int sacctmgr_list_account(int argc, char *argv[])
 
 	itr = list_iterator_create(format_list);
 	while((object = list_next(itr))) {
+		char *tmp_char = NULL;
 		field = xmalloc(sizeof(print_field_t));
 		if(!strncasecmp("Account", object, 1)
 		   || !strncasecmp("Name", object, 2)) {
@@ -827,15 +948,50 @@ extern int sacctmgr_list_account(int argc, char *argv[])
 			field->name = xstrdup("FairShare");
 			field->len = 9;
 			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("GrpCPUMins", object, 8)) {
+			field->type = PRINT_GRPCM;
+			field->name = xstrdup("GrpCPUMins");
+			field->len = 11;
+			field->print_routine = print_fields_uint64;
+		} else if(!strncasecmp("GrpCPUs", object, 8)) {
+			field->type = PRINT_GRPC;
+			field->name = xstrdup("GrpCPUs");
+			field->len = 8;
+			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("GrpJobs", object, 4)) {
+			field->type = PRINT_GRPJ;
+			field->name = xstrdup("GrpJobs");
+			field->len = 7;
+			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("GrpNodes", object, 4)) {
+			field->type = PRINT_GRPN;
+			field->name = xstrdup("GrpNodes");
+			field->len = 8;
+			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("GrpSubmitJobs", object, 4)) {
+			field->type = PRINT_GRPS;
+			field->name = xstrdup("GrpSubmit");
+			field->len = 9;
+			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("GrpWall", object, 4)) {
+			field->type = PRINT_GRPW;
+			field->name = xstrdup("GrpWall");
+			field->len = 11;
+			field->print_routine = print_fields_time;
 		} else if(!strncasecmp("ID", object, 1)) {
 			field->type = PRINT_ID;
 			field->name = xstrdup("ID");
 			field->len = 6;
 			field->print_routine = print_fields_uint;
-		} else if(!strncasecmp("MaxCPUSecs", object, 4)) {
-			field->type = PRINT_MAXC;
-			field->name = xstrdup("MaxCPUSecs");
+		} else if(!strncasecmp("MaxCPUMins", object, 7)) {
+			field->type = PRINT_MAXCM;
+			field->name = xstrdup("MaxCPUMins");
 			field->len = 11;
+			field->print_routine = print_fields_uint64;
+		} else if(!strncasecmp("MaxCPUs", object, 7)) {
+			field->type = PRINT_MAXC;
+			field->name = xstrdup("MaxCPUs");
+			field->len = 8;
 			field->print_routine = print_fields_uint;
 		} else if(!strncasecmp("MaxJobs", object, 4)) {
 			field->type = PRINT_MAXJ;
@@ -847,6 +1003,11 @@ extern int sacctmgr_list_account(int argc, char *argv[])
 			field->name = xstrdup("MaxNodes");
 			field->len = 8;
 			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("MaxSubmitJobs", object, 4)) {
+			field->type = PRINT_MAXS;
+			field->name = xstrdup("MaxSubmit");
+			field->len = 9;
+			field->print_routine = print_fields_uint;
 		} else if(!strncasecmp("MaxWall", object, 4)) {
 			field->type = PRINT_MAXW;
 			field->name = xstrdup("MaxWall");
@@ -888,6 +1049,11 @@ extern int sacctmgr_list_account(int argc, char *argv[])
 			xfree(field);
 			continue;
 		}
+		if((tmp_char = strstr(object, "\%"))) {
+			int newlen = atoi(tmp_char+1);
+			if(newlen > 0) 
+				field->len = newlen;
+		}
 		list_append(print_fields_list, field);		
 	}
 	list_iterator_destroy(itr);
@@ -956,16 +1122,66 @@ extern int sacctmgr_list_account(int argc, char *argv[])
 							(curr_inx == 
 							 field_count));
 						break;
+					case PRINT_GRPCM:
+						field->print_routine(
+							field,
+							assoc->grp_cpu_mins,
+							(curr_inx == 
+							 field_count));
+						break;
+					case PRINT_GRPC:
+						field->print_routine(
+							field,
+							assoc->grp_cpus,
+							(curr_inx == 
+							 field_count));
+						break;
+					case PRINT_GRPJ:
+						field->print_routine(
+							field, 
+							assoc->grp_jobs,
+							(curr_inx
+							 == field_count));
+						break;
+					case PRINT_GRPN:
+						field->print_routine(
+							field,
+							assoc->grp_nodes,
+							(curr_inx
+							 == field_count));
+						break;
+					case PRINT_GRPS:
+						field->print_routine(
+							field, 
+						assoc->grp_submit_jobs,
+							(curr_inx
+							 == field_count));
+						break;
+					case PRINT_GRPW:
+						field->print_routine(
+							field,
+							assoc->grp_wall,
+							(curr_inx
+							 == field_count));
+						break;
 					case PRINT_ID:
 						field->print_routine(
 							field, assoc->id,
 							(curr_inx == 
 							 field_count));
 						break;
+					case PRINT_MAXCM:
+						field->print_routine(
+							field,
+							assoc->
+							max_cpu_mins_pj,
+							(curr_inx == 
+							 field_count));
+						break;
 					case PRINT_MAXC:
 						field->print_routine(
-							field, assoc->
-							max_cpu_secs_per_job,
+							field,
+							assoc->max_cpus_pj,
 							(curr_inx == 
 							 field_count));
 						break;
@@ -978,15 +1194,22 @@ extern int sacctmgr_list_account(int argc, char *argv[])
 					case PRINT_MAXN:
 						field->print_routine(
 							field, assoc->
-							max_nodes_per_job,
+							max_nodes_pj,
 							(curr_inx == 
 							 field_count));
 						break;
+					case PRINT_MAXS:
+						field->print_routine(
+							field, 
+							assoc->max_submit_jobs,
+							(curr_inx ==
+							 field_count));
+						break;
 					case PRINT_MAXW:
 						field->print_routine(
 							field, 
 							assoc->
-							max_wall_duration_per_job,
+							max_wall_pj,
 							(curr_inx == 
 							 field_count));
 						break;
@@ -1006,25 +1229,17 @@ extern int sacctmgr_list_account(int argc, char *argv[])
 									NULL);
 						}
 						field->print_routine(
-							field, 
+							field,
 							qos_list,
-							acct->qos_list,
+							assoc->qos_list,
 							(curr_inx == 
 							 field_count));
 						break;
 					case PRINT_QOS_RAW:
-						if(!qos_list) {
-							qos_list = 
-								acct_storage_g_get_qos(
-									db_conn,
-									my_uid,
-									NULL);
-						}
 						field->print_routine(
 							field,
-							qos_list,
-							acct->qos_list,
-							(curr_inx == 
+							assoc->qos_list,
+							(curr_inx ==
 							 field_count));
 						break;
 					case PRINT_PID:
@@ -1055,6 +1270,10 @@ extern int sacctmgr_list_account(int argc, char *argv[])
 							 field_count));
 						break;
 					default:
+						field->print_routine(
+							field, NULL,
+							(curr_inx == 
+							 field_count));
 						break;
 					}
 					curr_inx++;
@@ -1067,126 +1286,67 @@ extern int sacctmgr_list_account(int argc, char *argv[])
 			int curr_inx = 1;
 			while((field = list_next(itr2))) {
 				switch(field->type) {
-				case PRINT_ACCOUNT:
-					field->print_routine(
-						field, acct->name,
-							(curr_inx == 
-							 field_count));
-					break;
+					/* All the association stuff */
 				case PRINT_CLUSTER:
-					field->print_routine(
-						field, NULL,
-							(curr_inx == 
-							 field_count));
-					break;
-				case PRINT_COORDS:
-					field->print_routine(
-						field,
-						acct->coordinators,
-							(curr_inx == 
-							 field_count));
-					break;
-				case PRINT_DESC:
-					field->print_routine(
-						field, acct->description,
-							(curr_inx == 
-							 field_count));
-					break;
 				case PRINT_FAIRSHARE:
-					field->print_routine(
-						field, NULL,
-							(curr_inx == 
-							 field_count));
-					break;
+				case PRINT_GRPCM:
+				case PRINT_GRPC:
+				case PRINT_GRPJ:
+				case PRINT_GRPN:
+				case PRINT_GRPS:
+				case PRINT_GRPW:
 				case PRINT_ID:
-					field->print_routine(
-						field, NULL,
-							(curr_inx == 
-							 field_count));
-					break;
+				case PRINT_MAXCM:
 				case PRINT_MAXC:
-					field->print_routine(
-						field, NULL,
-							(curr_inx == 
-							 field_count));
-					break;
 				case PRINT_MAXJ:
-					field->print_routine(
-						field, NULL,
-							(curr_inx == 
-							 field_count));
-					break;
 				case PRINT_MAXN:
-					field->print_routine(
-						field, NULL,
-							(curr_inx == 
-							 field_count));
-					break;
+				case PRINT_MAXS:
 				case PRINT_MAXW:
+				case PRINT_QOS_RAW:
+				case PRINT_PID:
+				case PRINT_PNAME:
+				case PRINT_PART:
+				case PRINT_USER:
 					field->print_routine(
 						field, NULL,
 							(curr_inx == 
 							 field_count));
 					break;
-				case PRINT_ORG:
-					field->print_routine(
-						field, acct->organization,
-							(curr_inx == 
-							 field_count));
-					break;
 				case PRINT_QOS:
-					if(!qos_list) {
-						qos_list = 
-							acct_storage_g_get_qos(
-								db_conn,
-								my_uid,
-								NULL);
-					}
 					field->print_routine(
-						field, qos_list,
-						acct->qos_list,
-							(curr_inx == 
-							 field_count));
+						field, NULL,
+						NULL,
+						(curr_inx == field_count));
 					break;
-				case PRINT_QOS_RAW:
-					if(!qos_list) {
-						qos_list = 
-							acct_storage_g_get_qos(
-								db_conn,
-								my_uid,
-								NULL);
-					}
+				case PRINT_ACCOUNT:
 					field->print_routine(
-						field, qos_list,
-						acct->qos_list,
+						field, acct->name,
 							(curr_inx == 
 							 field_count));
 					break;
-				case PRINT_PID:
+				case PRINT_COORDS:
 					field->print_routine(
-						field, NULL,
+						field,
+						acct->coordinators,
 							(curr_inx == 
 							 field_count));
 					break;
-				case PRINT_PNAME:
+				case PRINT_DESC:
 					field->print_routine(
-						field, NULL,
+						field, acct->description,
 							(curr_inx == 
 							 field_count));
 					break;
-				case PRINT_PART:
+				case PRINT_ORG:
 					field->print_routine(
-						field, NULL,
+						field, acct->organization,
 							(curr_inx == 
 							 field_count));
 					break;
-				case PRINT_USER:
+				default:
 					field->print_routine(
 						field, NULL,
-							(curr_inx == 
-							 field_count));
-					break;
-				default:
+						(curr_inx == field_count));
 					break;
 				}
 				curr_inx++;
@@ -1216,11 +1376,7 @@ extern int sacctmgr_modify_account(int argc, char *argv[])
 	int cond_set = 0, rec_set = 0, set = 0;
 	List ret_list = NULL;
 
-	assoc->fairshare = NO_VAL;
-	assoc->max_cpu_secs_per_job = NO_VAL;
-	assoc->max_jobs = NO_VAL;
-	assoc->max_nodes_per_job = NO_VAL;
-	assoc->max_wall_duration_per_job = NO_VAL;
+	init_acct_association_rec(assoc);
 
 	for (i=0; i<argc; i++) {
 		if (!strncasecmp (argv[i], "Where", 5)) {
@@ -1228,13 +1384,19 @@ extern int sacctmgr_modify_account(int argc, char *argv[])
 			cond_set = _set_cond(&i, argc, argv, acct_cond, NULL);
 		} else if (!strncasecmp (argv[i], "Set", 3)) {
 			i++;
-			rec_set = _set_rec(&i, argc, argv, acct, assoc);
+			rec_set = _set_rec(&i, argc, argv, NULL, NULL, 
+					   acct, assoc);
 		} else {
 			cond_set = _set_cond(&i, argc, argv, acct_cond, NULL);
 		}
 	}
 
-	if(!rec_set) {
+	if(exit_code) {
+		destroy_acct_account_cond(acct_cond);
+		destroy_acct_account_rec(acct);
+		destroy_acct_association_rec(assoc);
+		return SLURM_ERROR;
+	} else if(!rec_set) {
 		exit_code=1;
 		fprintf(stderr, " You didn't give me anything to set\n");
 		destroy_acct_account_cond(acct_cond);
@@ -1357,6 +1519,10 @@ extern int sacctmgr_delete_account(int argc, char *argv[])
 		return SLURM_ERROR;
 	}
 
+	if(exit_code) {
+		destroy_acct_account_cond(acct_cond);
+		return SLURM_ERROR;
+	}
 	/* check to see if person is trying to remove root account.  This is
 	 * bad, and should not be allowed outside of deleting a cluster.
 	 */
diff --git a/src/sacctmgr/association_functions.c b/src/sacctmgr/association_functions.c
index 08cf98a07..fcbedb90e 100644
--- a/src/sacctmgr/association_functions.c
+++ b/src/sacctmgr/association_functions.c
@@ -39,149 +39,204 @@
 #include "src/sacctmgr/sacctmgr.h"
 bool tree_display = 0;
 
-typedef struct {
-	char *name;
-	char *print_name;
-	char *spaces;
-} print_acct_t;
-
-static void _destroy_print_acct(void *object)
-{
-	print_acct_t *print_acct = (print_acct_t *)object;
-	if(print_acct) {
-		xfree(print_acct->name);
-		xfree(print_acct->print_name);
-		xfree(print_acct->spaces);
-		xfree(print_acct);
-	}
-}
-
-static char *_get_print_acct_name(char *name, char *parent, char *cluster, 
-				  List tree_list)
-{
-	ListIterator itr = NULL;
-	print_acct_t *print_acct = NULL;
-	print_acct_t *par_print_acct = NULL;
-	static char *ret_name = NULL;
-	static char *last_name = NULL, *last_cluster = NULL;
-
-
-	if(!tree_list) {
-		return NULL;
-	}
-	
-	itr = list_iterator_create(tree_list);
-	while((print_acct = list_next(itr))) {
-		if(!strcmp(name, print_acct->name)) {
-			ret_name = print_acct->print_name;
-			break;
-		} else if(parent && !strcmp(parent, print_acct->name)) {
-			par_print_acct = print_acct;
-		}
-	}
-	list_iterator_destroy(itr);
-	
-	if(parent && print_acct) {
-		return ret_name;
-	} 
-
-	print_acct = xmalloc(sizeof(print_acct_t));
-	print_acct->name = xstrdup(name);
-	if(par_print_acct) {
-		print_acct->spaces =
-			xstrdup_printf(" %s", par_print_acct->spaces);
-	} else {
-		print_acct->spaces = xstrdup("");
-	}
-
-	/* user account */
-	if(name[0] == '|')
-		print_acct->print_name = xstrdup_printf("%s%s", 
-							print_acct->spaces, 
-							parent);	
-	else
-		print_acct->print_name = xstrdup_printf("%s%s", 
-							print_acct->spaces, 
-							name);	
-	
-
-	list_append(tree_list, print_acct);
-
-	ret_name = print_acct->print_name;
-	last_name = name;
-	last_cluster = cluster;
-
-	return print_acct->print_name;
-}
-
 static int _set_cond(int *start, int argc, char *argv[],
-		     acct_association_cond_t *association_cond,
+		     acct_association_cond_t *assoc_cond,
 		     List format_list)
 {
 	int i, end = 0;
 	int set = 0;
+	List qos_list = NULL;
 
 	for (i=(*start); i<argc; i++) {
 		end = parse_option_end(argv[i]);
 		if (!end && !strncasecmp (argv[i], "Tree", 4)) {
 			tree_display = 1;
 		} else if (!end && !strncasecmp (argv[i], "WithDeleted", 5)) {
-			association_cond->with_deleted = 1;
+			assoc_cond->with_deleted = 1;
+		} else if (!end && 
+			   !strncasecmp (argv[i], "WithRawQOS", 5)) {
+			assoc_cond->with_raw_qos = 1;
+		} else if (!end && 
+			   !strncasecmp (argv[i], "WithSubAccounts", 5)) {
+			assoc_cond->with_sub_accts = 1;
 		} else if (!end && !strncasecmp (argv[i], "WOPInfo", 4)) {
-			association_cond->without_parent_info = 1;
+			assoc_cond->without_parent_info = 1;
 		} else if (!end && !strncasecmp (argv[i], "WOPLimits", 4)) {
-			association_cond->without_parent_limits = 1;
+			assoc_cond->without_parent_limits = 1;
 		} else if(!end && !strncasecmp(argv[i], "where", 5)) {
 			continue;
 		} else if(!end || !strncasecmp (argv[i], "Id", 1)
 			  || !strncasecmp (argv[i], "Associations", 2)) {
-			if(!association_cond->id_list)
-				association_cond->id_list = 
+			if(!assoc_cond->id_list)
+				assoc_cond->id_list = 
 					list_create(slurm_destroy_char);
-			slurm_addto_char_list(association_cond->id_list,
+			slurm_addto_char_list(assoc_cond->id_list,
 					      argv[i]+end);
 			set = 1;
-		} else if (!strncasecmp (argv[i], "Users", 1)) {
-			if(!association_cond->user_list)
-				association_cond->user_list = 
-					list_create(slurm_destroy_char);
-			slurm_addto_char_list(association_cond->user_list,
-					argv[i]+end);
-			set = 1;
 		} else if (!strncasecmp (argv[i], "Accounts", 2)) {
-			if(!association_cond->acct_list)
-				association_cond->acct_list = 
+			if(!assoc_cond->acct_list)
+				assoc_cond->acct_list = 
 					list_create(slurm_destroy_char);
-			slurm_addto_char_list(association_cond->acct_list,
+			slurm_addto_char_list(assoc_cond->acct_list,
 					argv[i]+end);
 			set = 1;
 		} else if (!strncasecmp (argv[i], "Clusters", 1)) {
-			if(!association_cond->cluster_list)
-				association_cond->cluster_list = 
+			if(!assoc_cond->cluster_list)
+				assoc_cond->cluster_list = 
 					list_create(slurm_destroy_char);
-			slurm_addto_char_list(association_cond->cluster_list,
+			slurm_addto_char_list(assoc_cond->cluster_list,
 					argv[i]+end);
 			set = 1;
 		} else if (!strncasecmp (argv[i], "Format", 1)) {
 			if(format_list)
-				slurm_addto_char_list(format_list, argv[i]+end);
+				slurm_addto_char_list(format_list,
+						      argv[i]+end);
+		} else if (!strncasecmp (argv[i], "FairShare", 1)) {
+			if(!assoc_cond->fairshare_list)
+				assoc_cond->fairshare_list =
+					list_create(slurm_destroy_char);
+			if(slurm_addto_char_list(assoc_cond->fairshare_list,
+					argv[i]+end))
+				set = 1;
+		} else if (!strncasecmp (argv[i], "GrpCPUMins", 7)) {
+			if(!assoc_cond->grp_cpu_mins_list)
+				assoc_cond->grp_cpu_mins_list =
+					list_create(slurm_destroy_char);
+			if(slurm_addto_char_list(assoc_cond->grp_cpu_mins_list,
+					argv[i]+end))
+				set = 1;
+		} else if (!strncasecmp (argv[i], "GrpCpus", 7)) {
+			if(!assoc_cond->grp_cpus_list)
+				assoc_cond->grp_cpus_list =
+					list_create(slurm_destroy_char);
+			if(slurm_addto_char_list(assoc_cond->grp_cpus_list,
+					argv[i]+end))
+				set = 1;
+		} else if (!strncasecmp (argv[i], "GrpJobs", 4)) {
+			if(!assoc_cond->grp_jobs_list)
+				assoc_cond->grp_jobs_list =
+					list_create(slurm_destroy_char);
+			if(slurm_addto_char_list(assoc_cond->grp_jobs_list,
+					argv[i]+end))
+				set = 1;
+		} else if (!strncasecmp (argv[i], "GrpNodes", 4)) {
+			if(!assoc_cond->grp_nodes_list)
+				assoc_cond->grp_nodes_list =
+					list_create(slurm_destroy_char);
+			if(slurm_addto_char_list(assoc_cond->grp_nodes_list,
+					argv[i]+end))
+				set = 1;
+		} else if (!strncasecmp (argv[i], "GrpSubmitJobs", 4)) {
+			if(!assoc_cond->grp_submit_jobs_list)
+				assoc_cond->grp_submit_jobs_list =
+					list_create(slurm_destroy_char);
+			if(slurm_addto_char_list(
+				   assoc_cond->grp_submit_jobs_list,
+				   argv[i]+end))
+				set = 1;
+		} else if (!strncasecmp (argv[i], "GrpWall", 4)) {
+			if(!assoc_cond->grp_wall_list)
+				assoc_cond->grp_wall_list =
+					list_create(slurm_destroy_char);
+			if(slurm_addto_char_list(
+				   assoc_cond->grp_wall_list,
+				   argv[i]+end))
+				set = 1;
+		} else if (!strncasecmp (argv[i], "MaxCPUMins", 7)) {
+			if(!assoc_cond->max_cpu_mins_pj_list)
+				assoc_cond->max_cpu_mins_pj_list =
+					list_create(slurm_destroy_char);
+			if(slurm_addto_char_list(
+				   assoc_cond->max_cpu_mins_pj_list,
+				   argv[i]+end))
+				set = 1;
+		} else if (!strncasecmp (argv[i], "MaxCpus", 7)) {
+			if(!assoc_cond->max_cpus_pj_list)
+				assoc_cond->max_cpus_pj_list =
+					list_create(slurm_destroy_char);
+			if(slurm_addto_char_list(
+				   assoc_cond->max_cpus_pj_list,
+				   argv[i]+end))
+				set = 1;
+		} else if (!strncasecmp (argv[i], "MaxJobs", 4)) {
+			if(!assoc_cond->max_jobs_list)
+				assoc_cond->max_jobs_list =
+					list_create(slurm_destroy_char);
+			if(slurm_addto_char_list(
+				   assoc_cond->max_jobs_list,
+				   argv[i]+end))
+				set = 1;
+		} else if (!strncasecmp (argv[i], "MaxNodes", 4)) {
+			if(!assoc_cond->max_nodes_pj_list)
+				assoc_cond->max_nodes_pj_list =
+					list_create(slurm_destroy_char);
+			if(slurm_addto_char_list(
+				   assoc_cond->max_nodes_pj_list,
+				   argv[i]+end))
+				set = 1;
+		} else if (!strncasecmp (argv[i], "MaxSubmitJobs", 4)) {
+			if(!assoc_cond->max_submit_jobs_list)
+				assoc_cond->max_submit_jobs_list =
+					list_create(slurm_destroy_char);
+			if(slurm_addto_char_list(
+				   assoc_cond->max_submit_jobs_list,
+				   argv[i]+end))
+				set = 1;
+		} else if (!strncasecmp (argv[i], "MaxWall", 4)) {
+			if(!assoc_cond->max_wall_pj_list)
+				assoc_cond->max_wall_pj_list =
+					list_create(slurm_destroy_char);
+			if(slurm_addto_char_list(
+				   assoc_cond->max_wall_pj_list,
+				   argv[i]+end))
+				set = 1;
 		} else if (!strncasecmp (argv[i], "Partitions", 4)) {
-			if(!association_cond->partition_list)
-				association_cond->partition_list = 
+			if(!assoc_cond->partition_list)
+				assoc_cond->partition_list = 
 					list_create(slurm_destroy_char);
-			slurm_addto_char_list(association_cond->partition_list,
+			slurm_addto_char_list(assoc_cond->partition_list,
 					argv[i]+end);
 			set = 1;
 		} else if (!strncasecmp (argv[i], "Parent", 4)) {
-			xfree(association_cond->parent_acct);
-			association_cond->parent_acct =
-				strip_quotes(argv[i]+end, NULL);
+			if(!assoc_cond->parent_acct_list) {
+				assoc_cond->parent_acct_list = 
+					list_create(slurm_destroy_char);
+			}
+			if(slurm_addto_char_list(assoc_cond->parent_acct_list,
+						 argv[i]+end))
+			set = 1;
+		} else if (!strncasecmp (argv[i], "QosLevel", 1)) {
+			int option = 0;
+			if(!assoc_cond->qos_list) {
+				assoc_cond->qos_list = 
+					list_create(slurm_destroy_char);
+			}
+			
+			if(!qos_list) {
+				qos_list = acct_storage_g_get_qos(
+					db_conn, my_uid, NULL);
+			}
+			
+			if(addto_qos_char_list(assoc_cond->qos_list, qos_list,
+					       argv[i]+end, option))
+				set = 1;
+			else
+				exit_code = 1;
+		} else if (!strncasecmp (argv[i], "Users", 1)) {
+			if(!assoc_cond->user_list)
+				assoc_cond->user_list = 
+					list_create(slurm_destroy_char);
+			slurm_addto_char_list(assoc_cond->user_list,
+					argv[i]+end);
 			set = 1;
 		} else {
 			exit_code = 1;
 			fprintf(stderr, " Unknown condition: %s\n", argv[i]);
 		}
 	}
+	if(qos_list)
+		list_destroy(qos_list);
+
 	(*start) = i;
 
 	return set;
@@ -348,6 +403,9 @@ extern int sacctmgr_list_association(int argc, char *argv[])
 	char *object = NULL;
 	char *print_acct = NULL, *last_cluster = NULL;
 	List tree_list = NULL;
+	List qos_list = NULL;
+
+	int field_count = 0;
 
 	print_field_t *field = NULL;
 
@@ -358,15 +416,25 @@ extern int sacctmgr_list_association(int argc, char *argv[])
 		PRINT_ACCOUNT,
 		PRINT_CLUSTER,
 		PRINT_FAIRSHARE,
+		PRINT_GRPCM,
+		PRINT_GRPC,
+		PRINT_GRPJ,
+		PRINT_GRPN,
+		PRINT_GRPS,
+		PRINT_GRPW,
 		PRINT_ID,
 		PRINT_LFT,
 		PRINT_MAXC,
+		PRINT_MAXCM,
 		PRINT_MAXJ,
 		PRINT_MAXN,
+		PRINT_MAXS,
 		PRINT_MAXW,
 		PRINT_PID,
 		PRINT_PNAME,
 		PRINT_PART,
+		PRINT_QOS,
+		PRINT_QOS_RAW,
 		PRINT_RGT,
 		PRINT_USER
 	};
@@ -379,12 +447,14 @@ extern int sacctmgr_list_association(int argc, char *argv[])
 		return SLURM_ERROR;
 	} else if(!list_count(format_list)) 
 		slurm_addto_char_list(format_list,
-				      "C,A,U,Part,F,MaxC,MaxJ,MaxN,MaxW");
+				      "C,A,U,Part,F,GrpJ,GrpN,GrpS,"
+				      "MaxJ,MaxN,MaxS,MaxW,QOS");
 
 	print_fields_list = list_create(destroy_print_field);
 
 	itr = list_iterator_create(format_list);
 	while((object = list_next(itr))) {
+		char *tmp_char = NULL;
 		field = xmalloc(sizeof(print_field_t));
 		if(!strncasecmp("Account", object, 1)) {
 			field->type = PRINT_ACCOUNT;
@@ -404,6 +474,36 @@ extern int sacctmgr_list_association(int argc, char *argv[])
 			field->name = xstrdup("FairShare");
 			field->len = 9;
 			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("GrpCPUMins", object, 8)) {
+			field->type = PRINT_GRPCM;
+			field->name = xstrdup("GrpCPUMins");
+			field->len = 11;
+			field->print_routine = print_fields_uint64;
+		} else if(!strncasecmp("GrpCPUs", object, 8)) {
+			field->type = PRINT_GRPC;
+			field->name = xstrdup("GrpCPUs");
+			field->len = 8;
+			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("GrpJobs", object, 4)) {
+			field->type = PRINT_GRPJ;
+			field->name = xstrdup("GrpJobs");
+			field->len = 7;
+			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("GrpNodes", object, 4)) {
+			field->type = PRINT_GRPN;
+			field->name = xstrdup("GrpNodes");
+			field->len = 8;
+			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("GrpSubmitJobs", object, 4)) {
+			field->type = PRINT_GRPS;
+			field->name = xstrdup("GrpSubmit");
+			field->len = 9;
+			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("GrpWall", object, 4)) {
+			field->type = PRINT_GRPW;
+			field->name = xstrdup("GrpWall");
+			field->len = 11;
+			field->print_routine = print_fields_time;
 		} else if(!strncasecmp("ID", object, 1)) {
 			field->type = PRINT_ID;
 			field->name = xstrdup("ID");
@@ -414,11 +514,15 @@ extern int sacctmgr_list_association(int argc, char *argv[])
 			field->name = xstrdup("LFT");
 			field->len = 6;
 			field->print_routine = print_fields_uint;
-		} else if(!strncasecmp("MaxCPUSecs", object, 4)
-			  || !strncasecmp("MaxProcSecsPerJob", object, 4)) {
-			field->type = PRINT_MAXC;
-			field->name = xstrdup("MaxCPUSecs");
+		} else if(!strncasecmp("MaxCPUMins", object, 7)) {
+			field->type = PRINT_MAXCM;
+			field->name = xstrdup("MaxCPUMins");
 			field->len = 11;
+			field->print_routine = print_fields_uint64;
+		} else if(!strncasecmp("MaxCPUs", object, 7)) {
+			field->type = PRINT_MAXC;
+			field->name = xstrdup("MaxCPUs");
+			field->len = 8;
 			field->print_routine = print_fields_uint;
 		} else if(!strncasecmp("MaxJobs", object, 4)) {
 			field->type = PRINT_MAXJ;
@@ -430,11 +534,26 @@ extern int sacctmgr_list_association(int argc, char *argv[])
 			field->name = xstrdup("MaxNodes");
 			field->len = 8;
 			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("MaxSubmitJobs", object, 4)) {
+			field->type = PRINT_MAXS;
+			field->name = xstrdup("MaxSubmit");
+			field->len = 9;
+			field->print_routine = print_fields_uint;
 		} else if(!strncasecmp("MaxWall", object, 4)) {
 			field->type = PRINT_MAXW;
 			field->name = xstrdup("MaxWall");
 			field->len = 11;
 			field->print_routine = print_fields_time;
+		} else if(!strncasecmp("QOSRAW", object, 4)) {
+			field->type = PRINT_QOS_RAW;
+			field->name = xstrdup("QOS_RAW");
+			field->len = 10;
+			field->print_routine = print_fields_char_list;
+		} else if(!strncasecmp("QOS", object, 1)) {
+			field->type = PRINT_QOS;
+			field->name = xstrdup("QOS");
+			field->len = 20;
+			field->print_routine = sacctmgr_print_qos_list;
 		} else if(!strncasecmp("ParentID", object, 7)) {
 			field->type = PRINT_PID;
 			field->name = xstrdup("Par ID");
@@ -467,6 +586,11 @@ extern int sacctmgr_list_association(int argc, char *argv[])
 			xfree(field);
 			continue;
 		}
+		if((tmp_char = strstr(object, "\%"))) {
+			int newlen = atoi(tmp_char+1);
+			if(newlen > 0) 
+				field->len = newlen;
+		}
 		list_append(print_fields_list, field);		
 	}
 	list_iterator_destroy(itr);
@@ -495,12 +619,16 @@ extern int sacctmgr_list_association(int argc, char *argv[])
 	itr2 = list_iterator_create(print_fields_list);
 	print_fields_header(print_fields_list);
 
+	field_count = list_count(print_fields_list);
+
 	while((assoc = list_next(itr))) {
+		int curr_inx = 1;
 		if(!last_cluster || strcmp(last_cluster, assoc->cluster)) {
 			if(tree_list) {
 				list_flush(tree_list);
 			} else {
-				tree_list = list_create(_destroy_print_acct);
+				tree_list = 
+					list_create(destroy_acct_print_tree);
 			}
 			last_cluster = assoc->cluster;
 		} 
@@ -520,7 +648,7 @@ extern int sacctmgr_list_association(int argc, char *argv[])
 						parent_acct = 
 							assoc->parent_acct;
 					}
-					print_acct = _get_print_acct_name(
+					print_acct = get_tree_acct_name(
 						local_acct,
 						parent_acct,
 						assoc->cluster, tree_list);
@@ -528,71 +656,152 @@ extern int sacctmgr_list_association(int argc, char *argv[])
 				} else {
 					print_acct = assoc->acct;
 				}
-				field->print_routine(field, 
-						     print_acct);
+				field->print_routine(
+					field, 
+					print_acct,
+					(curr_inx == field_count));
 				break;
 			case PRINT_CLUSTER:
-				field->print_routine(field,
-						     assoc->cluster);
+				field->print_routine(
+					field,
+					assoc->cluster,
+					(curr_inx == field_count));
 				break;
 			case PRINT_FAIRSHARE:
+				field->print_routine(
+					field,
+					assoc->fairshare,
+					(curr_inx == field_count));
+				break;
+			case PRINT_GRPCM:
+				field->print_routine(
+					field,
+					assoc->grp_cpu_mins,
+					(curr_inx == field_count));
+				break;
+			case PRINT_GRPC:
 				field->print_routine(field,
-						     assoc->fairshare);
+						     assoc->grp_cpus,
+						     (curr_inx == field_count));
+				break;
+			case PRINT_GRPJ:
+				field->print_routine(field, 
+						     assoc->grp_jobs,
+						     (curr_inx == field_count));
+				break;
+			case PRINT_GRPN:
+				field->print_routine(field,
+						     assoc->grp_nodes,
+						     (curr_inx == field_count));
+				break;
+			case PRINT_GRPS:
+				field->print_routine(field, 
+						     assoc->grp_submit_jobs,
+						     (curr_inx == field_count));
+				break;
+			case PRINT_GRPW:
+				field->print_routine(
+					field,
+					assoc->grp_wall,
+					(curr_inx == field_count));
 				break;
 			case PRINT_ID:
 				field->print_routine(field, 
-						     assoc->id);
+						     assoc->id,
+						     (curr_inx == field_count));
 				break;
 			case PRINT_LFT:
 				field->print_routine(field, 
-						     assoc->lft);
+						     assoc->lft,
+						     (curr_inx == field_count));
 				break;
-			case PRINT_MAXC:
+			case PRINT_MAXCM:
 				field->print_routine(
 					field,
-					assoc->max_cpu_secs_per_job);
+					assoc->max_cpu_mins_pj,
+					(curr_inx == field_count));
+				break;
+			case PRINT_MAXC:
+				field->print_routine(field,
+						     assoc->max_cpus_pj,
+						     (curr_inx == field_count));
 				break;
 			case PRINT_MAXJ:
 				field->print_routine(field, 
-						     assoc->max_jobs);
+						     assoc->max_jobs,
+						     (curr_inx == field_count));
 				break;
 			case PRINT_MAXN:
 				field->print_routine(field,
-						     assoc->max_nodes_per_job);
+						     assoc->max_nodes_pj,
+						     (curr_inx == field_count));
+				break;
+			case PRINT_MAXS:
+				field->print_routine(field, 
+						     assoc->max_submit_jobs,
+						     (curr_inx == field_count));
 				break;
 			case PRINT_MAXW:
 				field->print_routine(
 					field,
-					assoc->max_wall_duration_per_job);
+					assoc->max_wall_pj,
+					(curr_inx == field_count));
 				break;
 			case PRINT_PID:
 				field->print_routine(field,
-						     assoc->parent_id);
+						     assoc->parent_id,
+						     (curr_inx == field_count));
 				break;
 			case PRINT_PNAME:
 				field->print_routine(field,
-						     assoc->parent_acct);
+						     assoc->parent_acct,
+						     (curr_inx == field_count));
 				break;
 			case PRINT_PART:
 				field->print_routine(field,
-						     assoc->partition);
+						     assoc->partition,
+						     (curr_inx == field_count));
+				break;
+			case PRINT_QOS:
+				if(!qos_list) 
+					qos_list = acct_storage_g_get_qos(
+						db_conn, my_uid, NULL);
+				
+				field->print_routine(field,
+						     qos_list,
+						     assoc->qos_list,
+						     (curr_inx == field_count));
+				break;
+			case PRINT_QOS_RAW:
+				field->print_routine(field,
+						     assoc->qos_list,
+						     (curr_inx == field_count));
 				break;
 			case PRINT_RGT:
 				field->print_routine(field, 
-						     assoc->rgt);
+						     assoc->rgt,
+						     (curr_inx == field_count));
 				break;
 			case PRINT_USER:
 				field->print_routine(field, 
-						     assoc->user);
+						     assoc->user,
+						     (curr_inx == field_count));
 				break;
 			default:
+				field->print_routine(
+					field, NULL,
+					(curr_inx == field_count));
 				break;
 			}
+			curr_inx++;
 		}
 		list_iterator_reset(itr2);
 		printf("\n");
 	}
 
+	if(qos_list)
+		list_destroy(qos_list);
+
 	if(tree_list) 
 		list_destroy(tree_list);
 			
diff --git a/src/sacctmgr/cluster_functions.c b/src/sacctmgr/cluster_functions.c
index 2d4efcf4e..74a99cdae 100644
--- a/src/sacctmgr/cluster_functions.c
+++ b/src/sacctmgr/cluster_functions.c
@@ -78,11 +78,13 @@ static int _set_cond(int *start, int argc, char *argv[],
 }
 
 static int _set_rec(int *start, int argc, char *argv[],
+		    List name_list,
 		    acct_association_rec_t *assoc)
 {
 	int i, mins;
 	int set = 0;
 	int end = 0;
+	List qos_list = NULL;
 
 	for (i=(*start); i<argc; i++) {
 		end = parse_option_end(argv[i]);
@@ -91,29 +93,73 @@ static int _set_rec(int *start, int argc, char *argv[],
 			break;
 		} else if(!end && !strncasecmp(argv[i], "set", 3)) {
 			continue;
-		} else if(!end) {
-			exit_code=1;
-			fprintf(stderr, 
-				" Bad format on %s: End your option with "
-			       "an '=' sign\n", argv[i]);			
+		} else if(!end
+			  || !strncasecmp (argv[i], "Names", 1) 
+			  || !strncasecmp (argv[i], "Clusters", 1)) {
+			if(name_list)
+				slurm_addto_char_list(name_list, argv[i]+end);
 		} else if (!strncasecmp (argv[i], "FairShare", 1)) {
 			if (get_uint(argv[i]+end, &assoc->fairshare, 
 			    "FairShare") == SLURM_SUCCESS)
 				set = 1;
+		} else if (!strncasecmp (argv[i], "GrpCPUMins", 7)) {
+			if (get_uint64(argv[i]+end, 
+				       &assoc->grp_cpu_mins, 
+				       "GrpCPUMins") == SLURM_SUCCESS)
+				set = 1;
+		} else if (!strncasecmp (argv[i], "GrpCpus", 7)) {
+			if (get_uint(argv[i]+end, &assoc->grp_cpus,
+			    "GrpCpus") == SLURM_SUCCESS)
+				set = 1;
+		} else if (!strncasecmp (argv[i], "GrpJobs", 4)) {
+			if (get_uint(argv[i]+end, &assoc->grp_jobs,
+			    "GrpJobs") == SLURM_SUCCESS)
+				set = 1;
+		} else if (!strncasecmp (argv[i], "GrpNodes", 4)) {
+			if (get_uint(argv[i]+end, &assoc->grp_nodes,
+			    "GrpNodes") == SLURM_SUCCESS)
+				set = 1;
+		} else if (!strncasecmp (argv[i], "GrpSubmitJobs", 4)) {
+			if (get_uint(argv[i]+end, &assoc->grp_submit_jobs,
+			    "GrpSubmitJobs") == SLURM_SUCCESS)
+				set = 1;
+		} else if (!strncasecmp (argv[i], "GrpWall", 4)) {
+			mins = time_str2mins(argv[i]+end);
+			if (mins != NO_VAL) {
+				assoc->grp_wall	= (uint32_t) mins;
+				set = 1;
+			} else {
+				exit_code=1;
+				fprintf(stderr, 
+					" Bad GrpWall time format: %s\n", 
+					argv[i]);
+			}
+		} else if (!strncasecmp (argv[i], "MaxCPUMins", 7)) {
+			if (get_uint64(argv[i]+end, 
+				       &assoc->max_cpu_mins_pj, 
+				       "MaxCPUMins") == SLURM_SUCCESS)
+				set = 1;
+		} else if (!strncasecmp (argv[i], "MaxCpus", 7)) {
+			if (get_uint(argv[i]+end, &assoc->max_cpus_pj,
+			    "MaxCpus") == SLURM_SUCCESS)
+				set = 1;
 		} else if (!strncasecmp (argv[i], "MaxJobs", 4)) {
 			if (get_uint(argv[i]+end, &assoc->max_jobs,
 			    "MaxJobs") == SLURM_SUCCESS)
 				set = 1;
 		} else if (!strncasecmp (argv[i], "MaxNodes", 4)) {
 			if (get_uint(argv[i]+end, 
-			    &assoc->max_nodes_per_job,
+			    &assoc->max_nodes_pj,
 			    "MaxNodes") == SLURM_SUCCESS)
 				set = 1;
+		} else if (!strncasecmp (argv[i], "MaxSubmitJobs", 4)) {
+			if (get_uint(argv[i]+end, &assoc->max_submit_jobs,
+			    "MaxSubmitJobs") == SLURM_SUCCESS)
+				set = 1;
 		} else if (!strncasecmp (argv[i], "MaxWall", 4)) {
 			mins = time_str2mins(argv[i]+end);
 			if (mins != NO_VAL) {
-				assoc->max_wall_duration_per_job
-						= (uint32_t) mins;
+				assoc->max_wall_pj = (uint32_t) mins;
 				set = 1;
 			} else {
 				exit_code=1;
@@ -121,11 +167,26 @@ static int _set_rec(int *start, int argc, char *argv[],
 					" Bad MaxWall time format: %s\n", 
 					argv[i]);
 			}
-		} else if (!strncasecmp (argv[i], "MaxCPUSecs", 4)) {
-			if (get_uint(argv[i]+end, 
-			     &assoc->max_cpu_secs_per_job, 
-			    "MaxCPUSecs") == SLURM_SUCCESS)
+		} else if (!strncasecmp (argv[i], "QosLevel", 1)) {
+			int option = 0;
+			if(!assoc->qos_list) 
+				assoc->qos_list = 
+					list_create(slurm_destroy_char);
+						
+			if(!qos_list) 
+				qos_list = acct_storage_g_get_qos(
+					db_conn, my_uid, NULL);
+						
+			if(end > 2 && argv[i][end-1] == '='
+			   && (argv[i][end-2] == '+' 
+			       || argv[i][end-2] == '-'))
+				option = (int)argv[i][end-2];
+
+			if(addto_qos_char_list(assoc->qos_list,
+					       qos_list, argv[i]+end, option))
 				set = 1;
+			else
+				exit_code = 1;
 		} else {
 			exit_code=1;
 			fprintf(stderr, " Unknown option: %s\n"
@@ -135,6 +196,9 @@ static int _set_rec(int *start, int argc, char *argv[],
 	}
 	(*start) = i;
 
+	if(qos_list)
+		list_destroy(qos_list);
+
 	return set;
 
 }
@@ -143,53 +207,20 @@ static int _set_rec(int *start, int argc, char *argv[],
 extern int sacctmgr_add_cluster(int argc, char *argv[])
 {
 	int rc = SLURM_SUCCESS;
-	int i = 0, mins;
+	int i = 0;
 	acct_cluster_rec_t *cluster = NULL;
 	List name_list = list_create(slurm_destroy_char);
 	List cluster_list = NULL;
-	uint32_t fairshare = NO_VAL; 
-	uint32_t max_cpu_secs_per_job = NO_VAL;
-	uint32_t max_jobs = NO_VAL;
-	uint32_t max_nodes_per_job = NO_VAL;
-	uint32_t max_wall_duration_per_job = NO_VAL;
+	acct_association_rec_t start_assoc;
+
 	int limit_set = 0;
 	ListIterator itr = NULL, itr_c = NULL;
 	char *name = NULL;
 
-	for (i=0; i<argc; i++) {
-		int end = parse_option_end(argv[i]);
-		if(!end) {
-			slurm_addto_char_list(name_list, argv[i]+end);
-		} else if (!strncasecmp (argv[i], "FairShare", 1)) {
-			fairshare = atoi(argv[i]+end);
-			limit_set = 1;
-		} else if (!strncasecmp (argv[i], "MaxCPUSecs", 4)) {
-			max_cpu_secs_per_job = atoi(argv[i]+end);
-			limit_set = 1;
-		} else if (!strncasecmp (argv[i], "MaxJobs=", 4)) {
-			max_jobs = atoi(argv[i]+end);
-			limit_set = 1;
-		} else if (!strncasecmp (argv[i], "MaxNodes", 4)) {
-			max_nodes_per_job = atoi(argv[i]+end);
-			limit_set = 1;
-		} else if (!strncasecmp (argv[i], "MaxWall", 4)) {
-			mins = time_str2mins(argv[i]+end);
-			if (mins != NO_VAL) {
-				max_wall_duration_per_job = (uint32_t) mins;
-				limit_set = 1;
-			} else {
-				exit_code=1;
-				fprintf(stderr, 
-					" Bad MaxWall time format: %s\n", 
-					argv[i]);
-			}
-		} else if (!strncasecmp (argv[i], "Names", 1)) {
-			slurm_addto_char_list(name_list, argv[i]+end);
-		} else {
-			exit_code=1;
-			fprintf(stderr, " Unknown option: %s\n", argv[i]);
-		}		
-	}
+	init_acct_association_rec(&start_assoc);
+
+	for (i=0; i<argc; i++) 
+		limit_set = _set_rec(&i, argc, argv, name_list, &start_assoc);
 
 	if(exit_code) {
 		list_destroy(name_list);
@@ -247,52 +278,43 @@ extern int sacctmgr_add_cluster(int argc, char *argv[])
 	itr = list_iterator_create(name_list);
 	while((name = list_next(itr))) {
 		cluster = xmalloc(sizeof(acct_cluster_rec_t));
-		cluster->name = xstrdup(name);
+		
 		list_append(cluster_list, cluster);
-
+		cluster->name = xstrdup(name);
+		cluster->root_assoc = xmalloc(sizeof(acct_association_rec_t));
+		init_acct_association_rec(cluster->root_assoc);
 		printf("  Name          = %s\n", cluster->name);
 
-		cluster->default_fairshare = fairshare;		
-		cluster->default_max_cpu_secs_per_job = max_cpu_secs_per_job;
-		cluster->default_max_jobs = max_jobs;
-		cluster->default_max_nodes_per_job = max_nodes_per_job;
-		cluster->default_max_wall_duration_per_job = 
-			max_wall_duration_per_job;
+		cluster->root_assoc->fairshare = start_assoc.fairshare;		
+		
+		cluster->root_assoc->grp_cpu_mins = start_assoc.grp_cpu_mins;
+		cluster->root_assoc->grp_cpus = start_assoc.grp_cpus;
+		cluster->root_assoc->grp_jobs = start_assoc.grp_jobs;
+		cluster->root_assoc->grp_nodes = start_assoc.grp_nodes;
+		cluster->root_assoc->grp_submit_jobs =
+			start_assoc.grp_submit_jobs;
+		cluster->root_assoc->grp_wall = start_assoc.grp_wall;
+
+		cluster->root_assoc->max_cpu_mins_pj = 
+			start_assoc.max_cpu_mins_pj;
+		cluster->root_assoc->max_cpus_pj = start_assoc.max_cpus_pj;
+		cluster->root_assoc->max_jobs = start_assoc.max_jobs;
+		cluster->root_assoc->max_nodes_pj = start_assoc.max_nodes_pj;
+		cluster->root_assoc->max_submit_jobs =
+			start_assoc.max_submit_jobs;
+		cluster->root_assoc->max_wall_pj = start_assoc.max_wall_pj;
+
+		cluster->root_assoc->qos_list = 
+			copy_char_list(start_assoc.qos_list);
 	}
 	list_iterator_destroy(itr);
 	list_destroy(name_list);
 
 	if(limit_set) {
-		printf(" User Defaults\n");
-		if(fairshare == INFINITE)
-			printf("  Fairshare       = NONE\n");
-		else if(fairshare != NO_VAL) 
-			printf("  Fairshare       = %u\n", fairshare);
-		
-		if(max_cpu_secs_per_job == INFINITE)
-			printf("  MaxCPUSecs      = NONE\n");
-		else if(max_cpu_secs_per_job != NO_VAL) 
-			printf("  MaxCPUSecs      = %u\n",
-			       max_cpu_secs_per_job);
-		
-		if(max_jobs == INFINITE) 
-			printf("  MaxJobs         = NONE\n");
-		else if(max_jobs != NO_VAL) 
-			printf("  MaxJobs         = %u\n", max_jobs);
-		
-		if(max_nodes_per_job == INFINITE)
-			printf("  MaxNodes        = NONE\n");
-		else if(max_nodes_per_job != NO_VAL)
-			printf("  MaxNodes        = %u\n", max_nodes_per_job);
-		
-		if(max_wall_duration_per_job == INFINITE) 
-			printf("  MaxWall         = NONE\n");		
-		else if(max_wall_duration_per_job != NO_VAL) {
-			char time_buf[32];
-			mins2time_str((time_t) max_wall_duration_per_job, 
-				      time_buf, sizeof(time_buf));
-			printf("  MaxWall         = %s\n", time_buf);
-		}
+		printf(" Default Limits\n");
+		sacctmgr_print_assoc_limits(&start_assoc);
+		if(start_assoc.qos_list)
+			list_destroy(start_assoc.qos_list);
 	}
 
 	if(!list_count(cluster_list)) {
@@ -331,6 +353,7 @@ extern int sacctmgr_list_cluster(int argc, char *argv[])
 	ListIterator itr2 = NULL;
 	acct_cluster_rec_t *cluster = NULL;
 	char *object;
+	List qos_list = NULL;
 
 	int field_count = 0;
 
@@ -344,10 +367,21 @@ extern int sacctmgr_list_cluster(int argc, char *argv[])
 		PRINT_CHOST,
 		PRINT_CPORT,
 		PRINT_FAIRSHARE,
+		PRINT_GRPCM,
+		PRINT_GRPC,
+		PRINT_GRPJ,
+		PRINT_GRPN,
+		PRINT_GRPS,
+		PRINT_GRPW,
 		PRINT_MAXC,
+		PRINT_MAXCM,
 		PRINT_MAXJ,
 		PRINT_MAXN,
-		PRINT_MAXW
+		PRINT_MAXS,
+		PRINT_MAXW,
+		PRINT_QOS,
+		PRINT_QOS_RAW,
+		PRINT_RPC_VERSION		
 	};
 
 
@@ -363,12 +397,13 @@ extern int sacctmgr_list_cluster(int argc, char *argv[])
 
 	if(!list_count(format_list)) {
 		slurm_addto_char_list(format_list, 
-				      "Cl,Controlh,Controlp,F,MaxC,"
-				      "MaxJ,MaxN,MaxW");
+				      "Cl,Controlh,Controlp,RPC,F,"
+				      "GrpJ,GrpN,GrpS,MaxJ,MaxN,MaxS,MaxW,QOS");
 	}
 
 	itr = list_iterator_create(format_list);
 	while((object = list_next(itr))) {
+		char *tmp_char = NULL;
 		field = xmalloc(sizeof(print_field_t));
 		if(!strncasecmp("Cluster", object, 2)
 		   || !strncasecmp("Name", object, 2)) {
@@ -391,10 +426,45 @@ extern int sacctmgr_list_cluster(int argc, char *argv[])
 			field->name = xstrdup("FairShare");
 			field->len = 9;
 			field->print_routine = print_fields_uint;
-		} else if(!strncasecmp("MaxCPUSecs", object, 4)) {
-			field->type = PRINT_MAXC;
-			field->name = xstrdup("MaxCPUSecs");
+		} else if(!strncasecmp("GrpCPUMins", object, 8)) {
+			field->type = PRINT_GRPCM;
+			field->name = xstrdup("GrpCPUMins");
+			field->len = 11;
+			field->print_routine = print_fields_uint64;
+		} else if(!strncasecmp("GrpCPUs", object, 8)) {
+			field->type = PRINT_GRPC;
+			field->name = xstrdup("GrpCPUs");
+			field->len = 8;
+			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("GrpJobs", object, 4)) {
+			field->type = PRINT_GRPJ;
+			field->name = xstrdup("GrpJobs");
+			field->len = 7;
+			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("GrpNodes", object, 4)) {
+			field->type = PRINT_GRPN;
+			field->name = xstrdup("GrpNodes");
+			field->len = 8;
+			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("GrpSubmitJobs", object, 4)) {
+			field->type = PRINT_GRPS;
+			field->name = xstrdup("GrpSubmit");
+			field->len = 9;
+			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("GrpWall", object, 4)) {
+			field->type = PRINT_GRPW;
+			field->name = xstrdup("GrpWall");
 			field->len = 11;
+			field->print_routine = print_fields_time;
+		} else if(!strncasecmp("MaxCPUMins", object, 7)) {
+			field->type = PRINT_MAXCM;
+			field->name = xstrdup("MaxCPUMins");
+			field->len = 11;
+			field->print_routine = print_fields_uint64;
+		} else if(!strncasecmp("MaxCPUs", object, 7)) {
+			field->type = PRINT_MAXC;
+			field->name = xstrdup("MaxCPUs");
+			field->len = 8;
 			field->print_routine = print_fields_uint;
 		} else if(!strncasecmp("MaxJobs", object, 4)) {
 			field->type = PRINT_MAXJ;
@@ -406,17 +476,42 @@ extern int sacctmgr_list_cluster(int argc, char *argv[])
 			field->name = xstrdup("MaxNodes");
 			field->len = 8;
 			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("MaxSubmitJobs", object, 4)) {
+			field->type = PRINT_MAXS;
+			field->name = xstrdup("MaxSubmit");
+			field->len = 9;
+			field->print_routine = print_fields_uint;
 		} else if(!strncasecmp("MaxWall", object, 4)) {
 			field->type = PRINT_MAXW;
 			field->name = xstrdup("MaxWall");
 			field->len = 11;
 			field->print_routine = print_fields_time;
+		} else if(!strncasecmp("QOSRAW", object, 4)) {
+			field->type = PRINT_QOS_RAW;
+			field->name = xstrdup("QOS_RAW");
+			field->len = 10;
+			field->print_routine = print_fields_char_list;
+		} else if(!strncasecmp("QOS", object, 1)) {
+			field->type = PRINT_QOS;
+			field->name = xstrdup("QOS");
+			field->len = 20;
+			field->print_routine = sacctmgr_print_qos_list;
+		} else if(!strncasecmp("RPC", object, 1)) {
+			field->type = PRINT_RPC_VERSION;
+			field->name = xstrdup("RPC");
+			field->len = 3;
+			field->print_routine = print_fields_uint;
 		} else {
 			exit_code=1;
 			fprintf(stderr, "Unknown field '%s'\n", object);
 			xfree(field);
 			continue;
 		}
+		if((tmp_char = strstr(object, "\%"))) {
+			int newlen = atoi(tmp_char+1);
+			if(newlen > 0) 
+				field->len = newlen;
+		}
 		list_append(print_fields_list, field);		
 	}
 	list_iterator_destroy(itr);
@@ -447,6 +542,7 @@ extern int sacctmgr_list_cluster(int argc, char *argv[])
 
 	while((cluster = list_next(itr))) {
 		int curr_inx = 1;
+		acct_association_rec_t *assoc = cluster->root_assoc;
 		while((field = list_next(itr2))) {
 			switch(field->type) {
 			case PRINT_CLUSTER:
@@ -467,35 +563,98 @@ extern int sacctmgr_list_cluster(int argc, char *argv[])
 			case PRINT_FAIRSHARE:
 				field->print_routine(
 					field,
-					cluster->default_fairshare,
+					cluster->root_assoc->fairshare,
 					(curr_inx == field_count));
 				break;
-			case PRINT_MAXC:
+			case PRINT_GRPCM:
 				field->print_routine(
 					field,
-					cluster->default_max_cpu_secs_per_job,
+					assoc->grp_cpu_mins,
 					(curr_inx == field_count));
 				break;
-			case PRINT_MAXJ:
+			case PRINT_GRPC:
+				field->print_routine(field,
+						     assoc->grp_cpus,
+						     (curr_inx == field_count));
+				break;
+			case PRINT_GRPJ:
+				field->print_routine(field, 
+						     assoc->grp_jobs,
+						     (curr_inx == field_count));
+				break;
+			case PRINT_GRPN:
+				field->print_routine(field,
+						     assoc->grp_nodes,
+						     (curr_inx == field_count));
+				break;
+			case PRINT_GRPS:
+				field->print_routine(field, 
+						     assoc->grp_submit_jobs,
+						     (curr_inx == field_count));
+				break;
+			case PRINT_GRPW:
 				field->print_routine(
-					field, 
-					cluster->default_max_jobs,
+					field,
+					assoc->grp_wall,
 					(curr_inx == field_count));
 				break;
-			case PRINT_MAXN:
+			case PRINT_MAXCM:
 				field->print_routine(
 					field,
-					cluster->default_max_nodes_per_job,
+					assoc->max_cpu_mins_pj,
 					(curr_inx == field_count));
 				break;
+			case PRINT_MAXC:
+				field->print_routine(field,
+						     assoc->max_cpus_pj,
+						     (curr_inx == field_count));
+				break;
+			case PRINT_MAXJ:
+				field->print_routine(field, 
+						     assoc->max_jobs,
+						     (curr_inx == field_count));
+				break;
+			case PRINT_MAXN:
+				field->print_routine(field,
+						     assoc->max_nodes_pj,
+						     (curr_inx == field_count));
+				break;
+			case PRINT_MAXS:
+				field->print_routine(field, 
+						     assoc->max_submit_jobs,
+						     (curr_inx == field_count));
+				break;
 			case PRINT_MAXW:
 				field->print_routine(
 					field,
-					cluster->
-					default_max_wall_duration_per_job,
+					assoc->max_wall_pj,
+					(curr_inx == field_count));
+				break;
+			case PRINT_QOS:
+				if(!qos_list) 
+					qos_list = acct_storage_g_get_qos(
+						db_conn, my_uid, NULL);
+				
+				field->print_routine(field,
+						     qos_list,
+						     assoc->qos_list,
+						     (curr_inx == field_count));
+				break;
+			case PRINT_QOS_RAW:
+				field->print_routine(field,
+						     assoc->qos_list,
+						     (curr_inx == field_count));
+				break;
+			case PRINT_RPC_VERSION:
+				field->print_routine(
+					field,
+					cluster->rpc_version,
 					(curr_inx == field_count));
 				break;
 			default:
+				field->print_routine(
+					field, NULL,
+					(curr_inx == field_count));
 				break;
 			}
 			curr_inx++;
@@ -504,6 +663,9 @@ extern int sacctmgr_list_cluster(int argc, char *argv[])
 		printf("\n");
 	}
 
+	if(qos_list)
+		list_destroy(qos_list);
+
 	list_iterator_destroy(itr2);
 	list_iterator_destroy(itr);
 	list_destroy(cluster_list);
@@ -522,20 +684,11 @@ extern int sacctmgr_modify_cluster(int argc, char *argv[])
 	int cond_set = 0, rec_set = 0, set = 0;
 	List ret_list = NULL;
 
-	assoc_cond = xmalloc(sizeof(acct_association_cond_t));
+
+	init_acct_association_rec(assoc);
+
 	assoc_cond->cluster_list = list_create(slurm_destroy_char);
 	assoc_cond->acct_list = list_create(NULL);
-	assoc_cond->fairshare = NO_VAL;
-	assoc_cond->max_cpu_secs_per_job = NO_VAL;
-	assoc_cond->max_jobs = NO_VAL;
-	assoc_cond->max_nodes_per_job = NO_VAL;
-	assoc_cond->max_wall_duration_per_job = NO_VAL;
-	
-	assoc->fairshare = NO_VAL;
-	assoc->max_cpu_secs_per_job = NO_VAL;
-	assoc->max_jobs = NO_VAL;
-	assoc->max_nodes_per_job = NO_VAL;
-	assoc->max_wall_duration_per_job = NO_VAL;
 
 	for (i=0; i<argc; i++) {
 		if (!strncasecmp (argv[i], "Where", 5)) {
@@ -545,7 +698,7 @@ extern int sacctmgr_modify_cluster(int argc, char *argv[])
 				cond_set = 1;
 		} else if (!strncasecmp (argv[i], "Set", 3)) {
 			i++;
-			if(_set_rec(&i, argc, argv, assoc))
+			if(_set_rec(&i, argc, argv, NULL, assoc))
 				rec_set = 1;
 		} else {
 			if(_set_cond(&i, argc, argv,
@@ -575,39 +728,9 @@ extern int sacctmgr_modify_cluster(int argc, char *argv[])
 	}
 
 	printf(" Setting\n");
-	if(rec_set) 
-		printf(" User Defaults  =\n");
-
-	if(assoc->fairshare == INFINITE)
-		printf("  Fairshare     = NONE\n");
-	else if(assoc->fairshare != NO_VAL) 
-		printf("  Fairshare     = %u\n", assoc->fairshare);
-		
-	if(assoc->max_cpu_secs_per_job == INFINITE)
-		printf("  MaxCPUSecs    = NONE\n");
-	else if(assoc->max_cpu_secs_per_job != NO_VAL) 
-		printf("  MaxCPUSecs    = %u\n",
-		       assoc->max_cpu_secs_per_job);
-		
-	if(assoc->max_jobs == INFINITE) 
-		printf("  MaxJobs       = NONE\n");
-	else if(assoc->max_jobs != NO_VAL) 
-		printf("  MaxJobs       = %u\n", assoc->max_jobs);
-		
-	if(assoc->max_nodes_per_job == INFINITE)
-		printf("  MaxNodes      = NONE\n");
-	else if(assoc->max_nodes_per_job != NO_VAL)
-		printf("  MaxNodes      = %u\n",
-		       assoc->max_nodes_per_job);
-		
-	if(assoc->max_wall_duration_per_job == INFINITE) 
-		printf("  MaxWall       = NONE\n");		
-	else if(assoc->max_wall_duration_per_job != NO_VAL) {
-		char time_buf[32];
-		mins2time_str((time_t) 
-			      assoc->max_wall_duration_per_job, 
-			      time_buf, sizeof(time_buf));
-		printf("  MaxWall       = %s\n", time_buf);
+	if(rec_set) {
+		printf(" Default Limits =\n");
+		sacctmgr_print_assoc_limits(assoc);
 	}
 
 	list_append(assoc_cond->acct_list, "root");
@@ -711,6 +834,8 @@ extern int sacctmgr_dump_cluster (int argc, char *argv[])
 {
 	acct_user_cond_t user_cond;
 	acct_user_rec_t *user = NULL;
+	sacctmgr_assoc_t *sacctmgr_assoc = NULL;
+	acct_association_rec_t *assoc = NULL;
 	acct_association_cond_t assoc_cond;
 	List assoc_list = NULL;
 	List acct_list = NULL;
@@ -719,6 +844,7 @@ extern int sacctmgr_dump_cluster (int argc, char *argv[])
 	char *cluster_name = NULL;
 	char *file_name = NULL;
 	char *user_name = NULL;
+	char *line = NULL;
 	int i;
 	FILE *fd = NULL;
 
@@ -738,7 +864,8 @@ extern int sacctmgr_dump_cluster (int argc, char *argv[])
 		return SLURM_ERROR;
 		
 	} else {
-		if(user->admin_level < ACCT_ADMIN_SUPER_USER) {
+		if(my_uid != slurm_get_slurm_user_id() && my_uid != 0
+		    && user->admin_level < ACCT_ADMIN_SUPER_USER) {
 			exit_code=1;
 			fprintf(stderr, " Your user does not have sufficient "
 				"privileges to dump clusters.\n");
@@ -752,7 +879,7 @@ extern int sacctmgr_dump_cluster (int argc, char *argv[])
 
 	for (i=0; i<argc; i++) {
 		int end = parse_option_end(argv[i]);
-		if(!end) {
+		if(!end || !strncasecmp (argv[i], "Cluster", 1)) {
 			if(cluster_name) {
 				exit_code=1;
 				fprintf(stderr, 
@@ -770,15 +897,6 @@ extern int sacctmgr_dump_cluster (int argc, char *argv[])
 				continue;
 			}		
 			file_name = xstrdup(argv[i]+end);
-		} else if (!strncasecmp (argv[i], "Name", 1)) {
-			if(cluster_name) {
-				exit_code=1;
-				fprintf(stderr, 
-					" Can only do one cluster at a time.  "
-					"Already doing %s\n", cluster_name);
-				continue;
-			}
-			cluster_name = xstrdup(argv[i]+end);
 		} else {
 			exit_code=1;
 			fprintf(stderr, " Unknown option: %s\n", argv[i]);
@@ -798,6 +916,7 @@ extern int sacctmgr_dump_cluster (int argc, char *argv[])
 
 	memset(&assoc_cond, 0, sizeof(acct_association_cond_t));
 	assoc_cond.without_parent_limits = 1;
+	assoc_cond.with_raw_qos = 1;
 	assoc_cond.cluster_list = list_create(NULL);
 	list_append(assoc_cond.cluster_list, cluster_name);
 
@@ -812,7 +931,8 @@ extern int sacctmgr_dump_cluster (int argc, char *argv[])
 		return SLURM_ERROR;
 	} else if(!list_count(assoc_list)) {
 		exit_code=1;
-		fprintf(stderr, " Cluster %s returned nothing.", cluster_name);
+		fprintf(stderr, " Cluster %s returned nothing.\n",
+			cluster_name);
 		list_destroy(assoc_list);
 		xfree(cluster_name);
 		return SLURM_ERROR;
@@ -828,8 +948,10 @@ extern int sacctmgr_dump_cluster (int argc, char *argv[])
 	if(fprintf(fd,
 		   "# To edit this file start with a cluster line "
 		   "for the new cluster\n"
-		   "# Cluster - cluster_name\n"
-		   "# Followed by Accounts you want in this fashion...\n"
+		   "# Cluster - cluster_name:MaxNodesPerJob=50\n"
+		   "# Followed by Accounts you want in this fashion "
+		   "(root is created by default)...\n"
+		   "# Parent - root\n"
 		   "# Account - cs:MaxNodesPerJob=5:MaxJobs=4:"
 		   "MaxProcSecondsPerJob=20:FairShare=399:"
 		   "MaxWallDurationPerJob=40:Description='Computer Science':"
@@ -853,11 +975,22 @@ extern int sacctmgr_dump_cluster (int argc, char *argv[])
 		return SLURM_ERROR;
 	}
 
-	if(fprintf(fd, "Cluster - %s\n", cluster_name) < 0) {
+	line = xstrdup_printf("Cluster - %s", cluster_name);
+
+	sacctmgr_assoc = list_peek(sacctmgr_assoc_list);
+	assoc = sacctmgr_assoc->assoc;
+	if(strcmp(assoc->acct, "root")) 
+		fprintf(stderr, "Root association not on the top it was %s\n",
+			assoc->acct);
+	else 
+		print_file_add_limits_to_line(&line, assoc);
+	
+	if(fprintf(fd, "%s\n", line) < 0) {
 		exit_code=1;
-		fprintf(stderr, "Can't write to file");
+		fprintf(stderr, " Can't write to file");
 		return SLURM_ERROR;
 	}
+	info("%s", line);
 
 	print_file_sacctmgr_assoc_list(
 		fd, sacctmgr_assoc_list, user_list, acct_list);
diff --git a/src/sacctmgr/common.c b/src/sacctmgr/common.c
index 53a104083..7d9e693bb 100644
--- a/src/sacctmgr/common.c
+++ b/src/sacctmgr/common.c
@@ -37,6 +37,8 @@
 \*****************************************************************************/
 
 #include "src/sacctmgr/sacctmgr.h"
+#include "src/common/slurmdbd_defs.h"
+
 #include <unistd.h>
 #include <termios.h>
 
@@ -75,6 +77,27 @@ static void _nonblock(int state)
 
 }
 
+static char *_get_qos_list_str(List qos_list)
+{
+	char *qos_char = NULL;
+	ListIterator itr = NULL;
+	acct_qos_rec_t *qos = NULL;
+
+	if(!qos_list)
+		return NULL;
+
+	itr = list_iterator_create(qos_list);
+	while((qos = list_next(itr))) {
+		if(qos_char) 
+			xstrfmtcat(qos_char, ",%s", qos->name);
+		else
+			xstrcat(qos_char, qos->name);
+	}
+	list_iterator_destroy(itr);
+
+	return qos_char;
+}
+
 extern void destroy_sacctmgr_assoc(void *object)
 {
 	/* Most of this is pointers to something else that will be
@@ -459,13 +482,19 @@ extern acct_qos_rec_t *sacctmgr_find_qos_from_list(
 {
 	ListIterator itr = NULL;
 	acct_qos_rec_t *qos = NULL;
+	char *working_name = NULL;
 	
 	if(!name || !qos_list)
 		return NULL;
+
+	if(name[0] == '+' || name[0] == '-')
+		working_name = name+1;
+	else
+		working_name = name;
 	
 	itr = list_iterator_create(qos_list);
 	while((qos = list_next(itr))) {
-		if(!strcasecmp(name, qos->name))
+		if(!strcasecmp(working_name, qos->name))
 			break;
 	}
 	list_iterator_destroy(itr);
@@ -556,6 +585,29 @@ extern int get_uint(char *in_value, uint32_t *out_value, char *type)
 	return SLURM_SUCCESS;
 }
 
+extern int get_uint64(char *in_value, uint64_t *out_value, char *type)
+{
+	char *ptr = NULL, *meat = NULL;
+	long long num;
+	
+	if(!(meat = strip_quotes(in_value, NULL)))
+		return SLURM_ERROR;
+
+	num = strtoll(meat, &ptr, 10);
+	if ((num == 0) && ptr && ptr[0]) {
+		error("Invalid value for %s (%s)", type, meat);
+		xfree(meat);
+		return SLURM_ERROR;
+	}
+	xfree(meat);
+	
+	if (num < 0)
+		*out_value = INFINITE;		/* flag to clear */
+	else
+		*out_value = (uint64_t) num;
+	return SLURM_SUCCESS;
+}
+
 extern int addto_qos_char_list(List char_list, List qos_list, char *names, 
 			       int option)
 {
@@ -566,6 +618,8 @@ extern int addto_qos_char_list(List char_list, List qos_list, char *names,
 	int quote = 0;
 	uint32_t id=0;
 	int count = 0;
+	int equal_set = 0;
+	int add_set = 0;
 
 	if(!char_list) {
 		error("No list was given to fill in");
@@ -574,6 +628,7 @@ extern int addto_qos_char_list(List char_list, List qos_list, char *names,
 
 	if(!qos_list || !list_count(qos_list)) {
 		debug2("No real qos_list");
+		exit_code = 1;
 		return 0;
 	}
 
@@ -592,19 +647,60 @@ extern int addto_qos_char_list(List char_list, List qos_list, char *names,
 				names[i] = '`';
 			else if(names[i] == ',') {
 				if((i-start) > 0) {
+					int tmp_option = option;
+					if(names[start] == '+' 
+					   || names[start] == '-') {
+						tmp_option = names[start];
+						start++;
+					}
 					name = xmalloc((i-start+1));
 					memcpy(name, names+start, (i-start));
 					
 					id = str_2_acct_qos(qos_list, name);
+					if(id == NO_VAL) {
+						char *tmp = _get_qos_list_str(
+							qos_list);
+						error("You gave a bad qos "
+						      "'%s'.  Valid QOS's are "
+						      "%s",
+						      name, tmp);
+						xfree(tmp);
+						exit_code = 1;
+						xfree(name);
+						break;
+					}
 					xfree(name);
-					if(id == NO_VAL) 
-						goto bad;
-
-					if(option) {
+					
+					if(tmp_option) {
+						if(equal_set) {
+							error("You can't set "
+							      "qos equal to "
+							      "something and "
+							      "then add or "
+							      "subtract from "
+							      "it in the same "
+							      "line");
+							exit_code = 1;
+							break;
+						}
+						add_set = 1;
 						name = xstrdup_printf(
-							"%c%u", option, id);
-					} else
+							"%c%u", tmp_option, id);
+					} else {
+						if(add_set) {
+							error("You can't set "
+							      "qos equal to "
+							      "something and "
+							      "then add or "
+							      "subtract from "
+							      "it in the same "
+							      "line");
+							exit_code = 1;
+							break;
+						}
+						equal_set = 1;
 						name = xstrdup_printf("%u", id);
+					}
 					while((tmp_char = list_next(itr))) {
 						if(!strcasecmp(tmp_char, name))
 							break;
@@ -616,33 +712,73 @@ extern int addto_qos_char_list(List char_list, List qos_list, char *names,
 						count++;
 					} else 
 						xfree(name);
+				} else if (!(i-start)) {
+					list_append(char_list, xstrdup(""));
+					count++;
 				}
-			bad:
+
 				i++;
 				start = i;
 				if(!names[i]) {
-					info("There is a problem with "
-					     "your request.  It appears you "
-					     "have spaces inside your list.");
+					error("There is a problem with "
+					      "your request.  It appears you "
+					      "have spaces inside your list.");
+					exit_code = 1;
 					break;
 				}
 			}
 			i++;
 		}
 		if((i-start) > 0) {
+			int tmp_option = option;
+			if(names[start] == '+' || names[start] == '-') {
+				tmp_option = names[start];
+				start++;
+			}
 			name = xmalloc((i-start)+1);
 			memcpy(name, names+start, (i-start));
 			
 			id = str_2_acct_qos(qos_list, name);
-			xfree(name);
-			if(id == NO_VAL) 
+			if(id == NO_VAL) {
+				char *tmp = _get_qos_list_str(qos_list);
+				error("You gave a bad qos "
+				      "'%s'.  Valid QOS's are "
+				      "%s",
+				      name, tmp);
+				xfree(tmp);
+				xfree(name);
 				goto end_it;
-			
-			if(option) {
+			}
+			xfree(name);
+
+			if(tmp_option) {
+				if(equal_set) {
+					error("You can't set "
+					      "qos equal to "
+					      "something and "
+					      "then add or "
+					      "subtract from "
+					      "it in the same "
+					      "line");
+					exit_code = 1;
+					goto end_it;
+				}
 				name = xstrdup_printf(
-					"%c%u", option, id);
-			} else
+					"%c%u", tmp_option, id);
+			} else {
+				if(add_set) {
+					error("You can't set "
+					      "qos equal to "
+					      "something and "
+					      "then add or "
+					      "subtract from "
+					      "it in the same "
+					      "line");
+					exit_code = 1;
+					goto end_it;
+				}
 				name = xstrdup_printf("%u", id);
+			}
 			while((tmp_char = list_next(itr))) {
 				if(!strcasecmp(tmp_char, name))
 					break;
@@ -653,12 +789,138 @@ extern int addto_qos_char_list(List char_list, List qos_list, char *names,
 				count++;
 			} else 
 				xfree(name);
+		} else if (!(i-start)) {
+			list_append(char_list, xstrdup(""));
+			count++;
 		}
 	}	
+	if(!count) {
+		error("You gave me an empty qos list");
+		exit_code = 1;
+	}
+
 end_it:
 	list_iterator_destroy(itr);
 	return count;
-} 
+}
+ 
+extern int addto_action_char_list(List char_list, char *names)
+{
+	int i=0, start=0;
+	char *name = NULL, *tmp_char = NULL;
+	ListIterator itr = NULL;
+	char quote_c = '\0';
+	int quote = 0;
+	uint32_t id=0;
+	int count = 0;
+
+	if(!char_list) {
+		error("No list was given to fill in");
+		return 0;
+	}
+
+	itr = list_iterator_create(char_list);
+	if(names) {
+		if (names[i] == '\"' || names[i] == '\'') {
+			quote_c = names[i];
+			quote = 1;
+			i++;
+		}
+		start = i;
+		while(names[i]) {
+			if(quote && names[i] == quote_c)
+				break;
+			else if (names[i] == '\"' || names[i] == '\'')
+				names[i] = '`';
+			else if(names[i] == ',') {
+				if((i-start) > 0) {
+					name = xmalloc((i-start+1));
+					memcpy(name, names+start, (i-start));
+					
+					id = str_2_slurmdbd_msg_type(name);
+					if(id == NO_VAL) {
+						error("You gave a bad action "
+						      "'%s'.", name);
+						xfree(name);
+						break;
+					}
+					xfree(name);
+
+					name = xstrdup_printf("%u", id);
+					while((tmp_char = list_next(itr))) {
+						if(!strcasecmp(tmp_char, name))
+							break;
+					}
+					list_iterator_reset(itr);
+
+					if(!tmp_char) {
+						list_append(char_list, name);
+						count++;
+					} else 
+						xfree(name);
+				}
+
+				i++;
+				start = i;
+				if(!names[i]) {
+					error("There is a problem with "
+					      "your request.  It appears you "
+					      "have spaces inside your list.");
+					break;
+				}
+			}
+			i++;
+		}
+		if((i-start) > 0) {
+			name = xmalloc((i-start)+1);
+			memcpy(name, names+start, (i-start));
+			
+			id = str_2_slurmdbd_msg_type(name);
+			if(id == NO_VAL)  {
+				error("You gave a bad action '%s'.",
+				      name);
+				xfree(name);
+				goto end_it;
+			}
+			xfree(name);
+			
+			name = xstrdup_printf("%u", id);
+			while((tmp_char = list_next(itr))) {
+				if(!strcasecmp(tmp_char, name))
+					break;
+			}
+			
+			if(!tmp_char) {
+				list_append(char_list, name);
+				count++;
+			} else 
+				xfree(name);
+		}
+	}	
+end_it:
+	list_iterator_destroy(itr);
+	return count;
+}
+ 
+extern List copy_char_list(List char_list) 
+{
+	List ret_list = NULL;
+	char *tmp_char = NULL;
+	ListIterator itr = NULL;
+
+	if(!char_list || !list_count(char_list))
+		return NULL;
+
+	itr = list_iterator_create(char_list);
+	ret_list = list_create(slurm_destroy_char);
+	
+	while((tmp_char = list_next(itr))) 
+		list_append(ret_list, xstrdup(tmp_char));
+	
+	list_iterator_destroy(itr);
+	
+	return ret_list;
+}
 
 extern void sacctmgr_print_coord_list(
 	print_field_t *field, List value, int last)
@@ -720,41 +982,209 @@ extern void sacctmgr_print_qos_list(print_field_t *field, List qos_list,
 	xfree(print_this);
 }
 
-extern char *get_qos_complete_str(List qos_list, List num_qos_list)
+extern void sacctmgr_print_assoc_limits(acct_association_rec_t *assoc)
 {
-	List temp_list = NULL;
-	char *temp_char = NULL;
-	char *print_this = NULL;
-	ListIterator itr = NULL;
+	if(!assoc)
+		return;
+
+	if(assoc->fairshare == INFINITE)
+		printf("  Fairshare     = NONE\n");
+	else if(assoc->fairshare != NO_VAL) 
+		printf("  Fairshare     = %u\n", assoc->fairshare);
+
+	if(assoc->grp_cpu_mins == INFINITE)
+		printf("  GrpCPUMins    = NONE\n");
+	else if(assoc->grp_cpu_mins != NO_VAL) 
+		printf("  GrpCPUMins    = %llu\n", 
+		       (long long unsigned)assoc->grp_cpu_mins);
+		
+	if(assoc->grp_cpus == INFINITE)
+		printf("  GrpCPUs       = NONE\n");
+	else if(assoc->grp_cpus != NO_VAL) 
+		printf("  GrpCPUs       = %u\n", assoc->grp_cpus);
+				
+	if(assoc->grp_jobs == INFINITE) 
+		printf("  GrpJobs       = NONE\n");
+	else if(assoc->grp_jobs != NO_VAL) 
+		printf("  GrpJobs       = %u\n", assoc->grp_jobs);
+		
+	if(assoc->grp_nodes == INFINITE)
+		printf("  GrpNodes      = NONE\n");
+	else if(assoc->grp_nodes != NO_VAL)
+		printf("  GrpNodes      = %u\n", assoc->grp_nodes);
+		
+	if(assoc->grp_submit_jobs == INFINITE) 
+		printf("  GrpSubmitJobs = NONE\n");
+	else if(assoc->grp_submit_jobs != NO_VAL) 
+		printf("  GrpSubmitJobs = %u\n", 
+		       assoc->grp_submit_jobs);
+		
+	if(assoc->grp_wall == INFINITE) 
+		printf("  GrpWall       = NONE\n");		
+	else if(assoc->grp_wall != NO_VAL) {
+		char time_buf[32];
+		mins2time_str((time_t) assoc->grp_wall, 
+			      time_buf, sizeof(time_buf));
+		printf("  GrpWall       = %s\n", time_buf);
+	}
+
+	if(assoc->max_cpu_mins_pj == INFINITE)
+		printf("  MaxCPUMins    = NONE\n");
+	else if(assoc->max_cpu_mins_pj != NO_VAL) 
+		printf("  MaxCPUMins    = %llu\n", 
+		       (long long unsigned)assoc->max_cpu_mins_pj);
+		
+	if(assoc->max_cpus_pj == INFINITE)
+		printf("  MaxCPUs       = NONE\n");
+	else if(assoc->max_cpus_pj != NO_VAL) 
+		printf("  MaxCPUs       = %u\n", assoc->max_cpus_pj);
+				
+	if(assoc->max_jobs == INFINITE) 
+		printf("  MaxJobs       = NONE\n");
+	else if(assoc->max_jobs != NO_VAL) 
+		printf("  MaxJobs       = %u\n", assoc->max_jobs);
+		
+	if(assoc->max_nodes_pj == INFINITE)
+		printf("  MaxNodes      = NONE\n");
+	else if(assoc->max_nodes_pj != NO_VAL)
+		printf("  MaxNodes      = %u\n", assoc->max_nodes_pj);
+		
+	if(assoc->max_submit_jobs == INFINITE) 
+		printf("  MaxSubmitJobs = NONE\n");
+	else if(assoc->max_submit_jobs != NO_VAL) 
+		printf("  MaxSubmitJobs = %u\n", 
+		       assoc->max_submit_jobs);
+		
+	if(assoc->max_wall_pj == INFINITE) 
+		printf("  MaxWall       = NONE\n");		
+	else if(assoc->max_wall_pj != NO_VAL) {
+		char time_buf[32];
+		mins2time_str((time_t) assoc->max_wall_pj, 
+			      time_buf, sizeof(time_buf));
+		printf("  MaxWall       = %s\n", time_buf);
+	}
+
+	if(assoc->qos_list) {
+		List qos_list = acct_storage_g_get_qos(db_conn, my_uid, NULL);
+		char *temp_char = get_qos_complete_str(qos_list,
+						       assoc->qos_list);
+		if(temp_char) {		
+			printf("  QOS           = %s\n", temp_char);
+			xfree(temp_char);
+		}
+		if(qos_list)
+			list_destroy(qos_list);
+	} 
+}
+
+extern void sacctmgr_print_qos_limits(acct_qos_rec_t *qos)
+{
+	List qos_list = NULL;
+	if(!qos)
+		return;
+
+	if(qos->preemptee_list || qos->preemptor_list)
+		qos_list = acct_storage_g_get_qos(db_conn, my_uid, NULL);
 
-	if(!qos_list || !list_count(qos_list)
-	   || !num_qos_list || !list_count(num_qos_list))
-		return xstrdup("");
+	if(qos->job_flags)
+		printf("  JobFlags       = %s", qos->job_flags);
 
-	temp_list = list_create(NULL);
+	if(qos->grp_cpu_mins == INFINITE)
+		printf("  GrpCPUMins     = NONE\n");
+	else if(qos->grp_cpu_mins != NO_VAL) 
+		printf("  GrpCPUMins     = %llu\n", 
+		       (long long unsigned)qos->grp_cpu_mins);
+		
+	if(qos->grp_cpus == INFINITE)
+		printf("  GrpCPUs        = NONE\n");
+	else if(qos->grp_cpus != NO_VAL) 
+		printf("  GrpCPUs        = %u\n", qos->grp_cpus);
+				
+	if(qos->grp_jobs == INFINITE) 
+		printf("  GrpJobs        = NONE\n");
+	else if(qos->grp_jobs != NO_VAL) 
+		printf("  GrpJobs        = %u\n", qos->grp_jobs);
+		
+	if(qos->grp_nodes == INFINITE)
+		printf("  GrpNodes       = NONE\n");
+	else if(qos->grp_nodes != NO_VAL)
+		printf("  GrpNodes       = %u\n", qos->grp_nodes);
+		
+	if(qos->grp_submit_jobs == INFINITE) 
+		printf("  GrpSubmitJobs  = NONE\n");
+	else if(qos->grp_submit_jobs != NO_VAL) 
+		printf("  GrpSubmitJobs  = %u\n", 
+		       qos->grp_submit_jobs);
+		
+	if(qos->grp_wall == INFINITE) 
+		printf("  GrpWall        = NONE\n");		
+	else if(qos->grp_wall != NO_VAL) {
+		char time_buf[32];
+		mins2time_str((time_t) qos->grp_wall, 
+			      time_buf, sizeof(time_buf));
+		printf("  GrpWall        = %s\n", time_buf);
+	}
 
-	itr = list_iterator_create(num_qos_list);
-	while((temp_char = list_next(itr))) {
-		temp_char = acct_qos_str(qos_list, atoi(temp_char));
-		if(temp_char)
-			list_append(temp_list, temp_char);
+	if(qos->max_cpu_mins_pu == INFINITE)
+		printf("  MaxCPUMins     = NONE\n");
+	else if(qos->max_cpu_mins_pu != NO_VAL) 
+		printf("  MaxCPUMins     = %llu\n", 
+		       (long long unsigned)qos->max_cpu_mins_pu);
+		
+	if(qos->max_cpus_pu == INFINITE)
+		printf("  MaxCPUs        = NONE\n");
+	else if(qos->max_cpus_pu != NO_VAL) 
+		printf("  MaxCPUs        = %u\n", qos->max_cpus_pu);
+				
+	if(qos->max_jobs_pu == INFINITE) 
+		printf("  MaxJobs        = NONE\n");
+	else if(qos->max_jobs_pu != NO_VAL) 
+		printf("  MaxJobs        = %u\n", qos->max_jobs_pu);
+		
+	if(qos->max_nodes_pu == INFINITE)
+		printf("  MaxNodes       = NONE\n");
+	else if(qos->max_nodes_pu != NO_VAL)
+		printf("  MaxNodes       = %u\n", qos->max_nodes_pu);
+		
+	if(qos->max_submit_jobs_pu == INFINITE) 
+		printf("  MaxSubmitJobs  = NONE\n");
+	else if(qos->max_submit_jobs_pu != NO_VAL) 
+		printf("  MaxSubmitJobs  = %u\n", 
+		       qos->max_submit_jobs_pu);
+		
+	if(qos->max_wall_pu == INFINITE) 
+		printf("  MaxWall        = NONE\n");		
+	else if(qos->max_wall_pu != NO_VAL) {
+		char time_buf[32];
+		mins2time_str((time_t) qos->max_wall_pu, 
+			      time_buf, sizeof(time_buf));
+		printf("  MaxWall        = %s\n", time_buf);
 	}
-	list_iterator_destroy(itr);
-	list_sort(temp_list, (ListCmpF)sort_char_list);
-	itr = list_iterator_create(temp_list);
-	while((temp_char = list_next(itr))) {
-		if(print_this) 
-			xstrfmtcat(print_this, ",%s", temp_char);
-		else 
-			print_this = xstrdup(temp_char);
+
+	if(qos->preemptee_list) {
+		char *temp_char = get_qos_complete_str(qos_list,
+						       qos->preemptee_list);
+		if(temp_char) {		
+			printf("  Preemptable by = %s\n", temp_char);
+			xfree(temp_char);
+		}
 	}
-	list_iterator_destroy(itr);
-	list_destroy(temp_list);
+	if(qos->preemptor_list) {
+		char *temp_char = get_qos_complete_str(qos_list,
+						       qos->preemptee_list);
+		if(temp_char) {		
+			printf("  Can Preempt    = %s\n", temp_char);
+			xfree(temp_char);
+		}
+	} 
 
-	if(!print_this)
-		return xstrdup("");
+	if(qos->priority == INFINITE)
+		printf("  Priority       = NONE\n");
+	else if(qos->priority != NO_VAL) 
+		printf("  Priority       = %d\n", qos->priority);
 
-	return print_this;
+	if(qos_list)
+		list_destroy(qos_list);
 }
 
 extern int sort_coord_list(acct_coord_rec_t *coord_a, acct_coord_rec_t *coord_b)
@@ -768,16 +1198,3 @@ extern int sort_coord_list(acct_coord_rec_t *coord_a, acct_coord_rec_t *coord_b)
 	
 	return 0;
 }
-
-extern int sort_char_list(char *name_a, char *name_b)
-{
-	int diff = strcmp(name_a, name_b);
-
-	if (diff < 0)
-		return -1;
-	else if (diff > 0)
-		return 1;
-	
-	return 0;
-}
-
diff --git a/src/sacctmgr/file_functions.c b/src/sacctmgr/file_functions.c
index 412e79a43..173a4daa4 100644
--- a/src/sacctmgr/file_functions.c
+++ b/src/sacctmgr/file_functions.c
@@ -46,10 +46,21 @@ typedef struct {
 	char *def_acct;
 	char *desc;
 	uint32_t fairshare;
-	uint32_t max_cpu_secs_per_job; 
+
+	uint64_t grp_cpu_mins;
+	uint32_t grp_cpus;
+	uint32_t grp_jobs;
+	uint32_t grp_nodes; 
+	uint32_t grp_submit_jobs;
+	uint32_t grp_wall;
+
+	uint64_t max_cpu_mins_pj; 
+	uint32_t max_cpus_pj; 
 	uint32_t max_jobs;
-	uint32_t max_nodes_per_job; 
-	uint32_t max_wall_duration_per_job;
+	uint32_t max_nodes_pj; 
+	uint32_t max_submit_jobs;
+	uint32_t max_wall_pj;
+
 	char *name;
 	char *org;
 	char *part;
@@ -64,10 +75,18 @@ enum {
 	PRINT_DACCT,
 	PRINT_DESC,
 	PRINT_FAIRSHARE,
+	PRINT_GRPCM,
+	PRINT_GRPC,
+	PRINT_GRPJ,
+	PRINT_GRPN,
+	PRINT_GRPS,
+	PRINT_GRPW,
 	PRINT_ID,
 	PRINT_MAXC,
+	PRINT_MAXCM,
 	PRINT_MAXJ,
 	PRINT_MAXN,
+	PRINT_MAXS,
 	PRINT_MAXW,
 	PRINT_NAME,
 	PRINT_ORG,
@@ -87,6 +106,34 @@ typedef enum {
 
 static List qos_list = NULL;
 
+static int _init_sacctmgr_file_opts(sacctmgr_file_opts_t *file_opts)
+{
+	if(!file_opts)
+		return SLURM_ERROR;
+
+	memset(file_opts, 0, sizeof(sacctmgr_file_opts_t));
+
+	file_opts->admin = ACCT_ADMIN_NOTSET;
+
+	file_opts->fairshare = 1;
+
+	file_opts->grp_cpu_mins = INFINITE;
+	file_opts->grp_cpus = INFINITE;
+	file_opts->grp_jobs = INFINITE;
+	file_opts->grp_nodes = INFINITE;
+	file_opts->grp_submit_jobs = INFINITE;
+	file_opts->grp_wall = INFINITE;
+
+	file_opts->max_cpu_mins_pj = INFINITE;
+	file_opts->max_cpus_pj = INFINITE;
+	file_opts->max_jobs = INFINITE;
+	file_opts->max_nodes_pj = INFINITE;
+	file_opts->max_submit_jobs = INFINITE;
+	file_opts->max_wall_pj = INFINITE;
+
+	return SLURM_SUCCESS;
+}
+
 static int _strip_continuation(char *buf, int len)
 {
 	char *ptr;
@@ -207,12 +254,7 @@ static sacctmgr_file_opts_t *_parse_options(char *options)
 	char *option = NULL;
 	char quote_c = '\0';
 	
-	file_opts->fairshare = 1;
-	file_opts->max_cpu_secs_per_job = INFINITE;
-	file_opts->max_jobs = INFINITE;
-	file_opts->max_nodes_per_job = INFINITE;
-	file_opts->max_wall_duration_per_job = INFINITE;
-	file_opts->admin = ACCT_ADMIN_NOTSET;
+	_init_sacctmgr_file_opts(file_opts);
 
 	while(options[i]) {
 		quote = 0;
@@ -276,19 +318,88 @@ static sacctmgr_file_opts_t *_parse_options(char *options)
 				_destroy_sacctmgr_file_opts(file_opts);
 				break;
 			}
-		} else if (!strncasecmp (sub, "MaxCPUSec", 4)
+		} else if (!strncasecmp (sub, "GrpCPUMins", 7)) {
+			if (get_uint64(option, &file_opts->grp_cpu_mins,
+				       "GrpCPUMins") != SLURM_SUCCESS) {
+				exit_code=1;
+				fprintf(stderr, 
+					" Bad GrpCPUMins value: %s\n", option);
+				_destroy_sacctmgr_file_opts(file_opts);
+				break;
+			}
+		} else if (!strncasecmp (sub, "GrpCPUs", 7)) {
+			if (get_uint(option, &file_opts->grp_cpus,
+				     "GrpCPUs") != SLURM_SUCCESS) {
+				exit_code=1;
+				fprintf(stderr, 
+					" Bad GrpCPUs value: %s\n", option);
+				_destroy_sacctmgr_file_opts(file_opts);
+				break;
+			}
+		} else if (!strncasecmp (sub, "GrpJobs", 4)) {
+			if (get_uint(option, &file_opts->grp_jobs,
+				     "GrpJobs") != SLURM_SUCCESS) {
+				exit_code=1;
+				fprintf(stderr, 
+					" Bad GrpJobs value: %s\n", option);
+				_destroy_sacctmgr_file_opts(file_opts);
+				break;
+			}
+		} else if (!strncasecmp (sub, "GrpNodes", 4)) {
+			if (get_uint(option, &file_opts->grp_nodes,
+				     "GrpNodes") != SLURM_SUCCESS) {
+				exit_code=1;
+				fprintf(stderr, 
+					" Bad GrpNodes value: %s\n", option);
+				_destroy_sacctmgr_file_opts(file_opts);
+				break;
+			}
+		} else if (!strncasecmp (sub, "GrpSubmitJobs", 4)) {
+			if (get_uint(option, &file_opts->grp_submit_jobs,
+				     "GrpSubmitJobs") != SLURM_SUCCESS) {
+				exit_code=1;
+				fprintf(stderr, 
+					" Bad GrpJobs value: %s\n", option);
+				_destroy_sacctmgr_file_opts(file_opts);
+				break;
+			}
+		} else if (!strncasecmp (sub, "GrpWall", 4)) {
+			mins = time_str2mins(option);
+			if (mins >= 0) {
+				file_opts->grp_wall 
+					= (uint32_t) mins;
+			} else if (strcmp(option, "-1")) {
+				file_opts->grp_wall = INFINITE;
+			} else {
+				exit_code=1;
+				fprintf(stderr, 
+					" Bad GrpWall time format: %s\n", 
+					option);
+				_destroy_sacctmgr_file_opts(file_opts);
+				break;
+			}
+		} else if (!strncasecmp (sub, "MaxCPUMins", 7)
 			   || !strncasecmp (sub, "MaxProcSec", 4)) {
-			if (get_uint(option, &file_opts->max_cpu_secs_per_job,
-			    "MaxCPUSec") != SLURM_SUCCESS) {
+			if (get_uint64(option, &file_opts->max_cpu_mins_pj,
+				       "MaxCPUMins") != SLURM_SUCCESS) {
 				exit_code=1;
 				fprintf(stderr, 
-					" Bad MaxCPUSec value: %s\n", option);
+					" Bad MaxCPUMins value: %s\n", option);
+				_destroy_sacctmgr_file_opts(file_opts);
+				break;
+			}
+		} else if (!strncasecmp (sub, "MaxCPUs", 7)) {
+			if (get_uint(option, &file_opts->max_cpus_pj,
+				     "MaxCPUs") != SLURM_SUCCESS) {
+				exit_code=1;
+				fprintf(stderr, 
+					" Bad MaxCPUs value: %s\n", option);
 				_destroy_sacctmgr_file_opts(file_opts);
 				break;
 			}
 		} else if (!strncasecmp (sub, "MaxJobs", 4)) {
 			if (get_uint(option, &file_opts->max_jobs,
-			    "MaxJobs") != SLURM_SUCCESS) {
+				     "MaxJobs") != SLURM_SUCCESS) {
 				exit_code=1;
 				fprintf(stderr, 
 					" Bad MaxJobs value: %s\n", option);
@@ -296,21 +407,30 @@ static sacctmgr_file_opts_t *_parse_options(char *options)
 				break;
 			}
 		} else if (!strncasecmp (sub, "MaxNodes", 4)) {
-			if (get_uint(option, &file_opts->max_nodes_per_job,
-			    "MaxNodes") != SLURM_SUCCESS) {
+			if (get_uint(option, &file_opts->max_nodes_pj,
+				     "MaxNodes") != SLURM_SUCCESS) {
 				exit_code=1;
 				fprintf(stderr, 
 					" Bad MaxNodes value: %s\n", option);
 				_destroy_sacctmgr_file_opts(file_opts);
 				break;
 			}
+		} else if (!strncasecmp (sub, "MaxSubmitJobs", 4)) {
+			if (get_uint(option, &file_opts->max_submit_jobs,
+				     "MaxSubmitJobs") != SLURM_SUCCESS) {
+				exit_code=1;
+				fprintf(stderr, 
+					" Bad MaxJobs value: %s\n", option);
+				_destroy_sacctmgr_file_opts(file_opts);
+				break;
+			}
 		} else if (!strncasecmp (sub, "MaxWall", 4)) {
 			mins = time_str2mins(option);
 			if (mins >= 0) {
-				file_opts->max_wall_duration_per_job 
+				file_opts->max_wall_pj 
 					= (uint32_t) mins;
 			} else if (strcmp(option, "-1")) {
-				file_opts->max_wall_duration_per_job = INFINITE;
+				file_opts->max_wall_pj = INFINITE;
 			} else {
 				exit_code=1;
 				fprintf(stderr, 
@@ -362,7 +482,12 @@ static sacctmgr_file_opts_t *_parse_options(char *options)
 		exit_code=1;
 		fprintf(stderr, " No name given\n");
 		_destroy_sacctmgr_file_opts(file_opts);
+		file_opts = NULL;
+	} else if(exit_code) {
+		_destroy_sacctmgr_file_opts(file_opts);
+		file_opts = NULL;
 	}
+
 	return file_opts;
 }
 
@@ -412,15 +537,50 @@ static List _set_up_print_fields(List format_list)
 			field->name = xstrdup("FairShare");
 			field->len = 9;
 			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("GrpCPUMins", object, 7)) {
+			field->type = PRINT_GRPCM;
+			field->name = xstrdup("GrpCPUMins");
+			field->len = 11;
+			field->print_routine = print_fields_uint64;
+		} else if(!strncasecmp("GrpCPUs", object, 7)) {
+			field->type = PRINT_GRPC;
+			field->name = xstrdup("GrpCPUs");
+			field->len = 8;
+			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("GrpJobs", object, 4)) {
+			field->type = PRINT_GRPJ;
+			field->name = xstrdup("GrpJobs");
+			field->len = 7;
+			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("GrpNodes", object, 4)) {
+			field->type = PRINT_GRPN;
+			field->name = xstrdup("GrpNodes");
+			field->len = 8;
+			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("GrpSubmitJobs", object, 4)) {
+			field->type = PRINT_GRPS;
+			field->name = xstrdup("GrpSubmit");
+			field->len = 9;
+			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("GrpWall", object, 4)) {
+			field->type = PRINT_GRPW;
+			field->name = xstrdup("GrpWall");
+			field->len = 11;
+			field->print_routine = print_fields_time;
 		} else if(!strncasecmp("ID", object, 1)) {
 			field->type = PRINT_ID;
 			field->name = xstrdup("ID");
 			field->len = 6;
 			field->print_routine = print_fields_uint;
-		} else if(!strncasecmp("MaxCPUSecs", object, 4)) {
-			field->type = PRINT_MAXC;
-			field->name = xstrdup("MaxCPUSecs");
+		} else if(!strncasecmp("MaxCPUMins", object, 7)) {
+			field->type = PRINT_MAXCM;
+			field->name = xstrdup("MaxCPUMins");
 			field->len = 11;
+			field->print_routine = print_fields_uint64;
+		} else if(!strncasecmp("MaxCPUs", object, 7)) {
+			field->type = PRINT_MAXC;
+			field->name = xstrdup("MaxCPUs");
+			field->len = 8;
 			field->print_routine = print_fields_uint;
 		} else if(!strncasecmp("MaxJobs", object, 4)) {
 			field->type = PRINT_MAXJ;
@@ -432,6 +592,11 @@ static List _set_up_print_fields(List format_list)
 			field->name = xstrdup("MaxNodes");
 			field->len = 8;
 			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("MaxSubmitJobs", object, 4)) {
+			field->type = PRINT_MAXS;
+			field->name = xstrdup("MaxSubmit");
+			field->len = 9;
+			field->print_routine = print_fields_uint;
 		} else if(!strncasecmp("MaxWall", object, 4)) {
 			field->type = PRINT_MAXW;
 			field->name = xstrdup("MaxWall");
@@ -484,7 +649,7 @@ static List _set_up_print_fields(List format_list)
 	return print_fields_list;
 }
 
-static int _print_out_assoc(List assoc_list, bool user)
+static int _print_out_assoc(List assoc_list, bool user, bool add)
 {
 	List format_list = NULL;
 	List print_fields_list = NULL;
@@ -499,10 +664,14 @@ static int _print_out_assoc(List assoc_list, bool user)
 	format_list = list_create(slurm_destroy_char);
 	if(user)
 		slurm_addto_char_list(format_list,
-				"User,Account,F,MaxC,MaxJ,MaxN,MaxW");
+				      "User,Account,F,GrpCPUM,GrpCPUs,"
+				      "GrpJ,GrpN,GrpS,GrpW,MaxCPUM,MaxCPUs,"
+				      "MaxJ,MaxS,MaxN,MaxW,QOS");
 	else 
 		slurm_addto_char_list(format_list,
-				"Account,Parent,F,MaxC,MaxJ,MaxN,MaxW");
+				      "Account,Parent,F,GrpCPUM,GrpCPUs,"
+				      "GrpJ,GrpN,GrpS,GrpW,MaxCPUM,MaxCPUs,"
+				      "MaxJ,MaxS,MaxN,MaxW,QOS");
 	
 	print_fields_list = _set_up_print_fields(format_list);
 	list_destroy(format_list);
@@ -522,10 +691,40 @@ static int _print_out_assoc(List assoc_list, bool user)
 				field->print_routine(field,
 						     assoc->fairshare);
 				break;
-			case PRINT_MAXC:
+			case PRINT_GRPCM:
+				field->print_routine(
+					field,
+					assoc->grp_cpu_mins);
+				break;
+			case PRINT_GRPC:
+				field->print_routine(field,
+						     assoc->grp_cpus);
+				break;
+			case PRINT_GRPJ:
+				field->print_routine(field, 
+						     assoc->grp_jobs);
+				break;
+			case PRINT_GRPN:
+				field->print_routine(field,
+						     assoc->grp_nodes);
+				break;
+			case PRINT_GRPS:
+				field->print_routine(field, 
+						     assoc->grp_submit_jobs);
+				break;
+			case PRINT_GRPW:
 				field->print_routine(
 					field,
-					assoc->max_cpu_secs_per_job);
+					assoc->grp_wall);
+				break;
+			case PRINT_MAXCM:
+				field->print_routine(
+					field,
+					assoc->max_cpu_mins_pj);
+				break;
+			case PRINT_MAXC:
+				field->print_routine(field,
+						     assoc->max_cpus_pj);
 				break;
 			case PRINT_MAXJ:
 				field->print_routine(field, 
@@ -533,12 +732,16 @@ static int _print_out_assoc(List assoc_list, bool user)
 				break;
 			case PRINT_MAXN:
 				field->print_routine(field,
-						     assoc->max_nodes_per_job);
+						     assoc->max_nodes_pj);
+				break;
+			case PRINT_MAXS:
+				field->print_routine(field, 
+						     assoc->max_submit_jobs);
 				break;
 			case PRINT_MAXW:
 				field->print_routine(
 					field,
-					assoc->max_wall_duration_per_job);
+					assoc->max_wall_pj);
 				break;
 			case PRINT_PARENT:
 				field->print_routine(field,
@@ -548,11 +751,19 @@ static int _print_out_assoc(List assoc_list, bool user)
 				field->print_routine(field,
 						     assoc->partition);
 				break;
+			case PRINT_QOS:
+				field->print_routine(
+					field,
+					qos_list,
+					assoc->qos_list);
+				break;
 			case PRINT_USER:
 				field->print_routine(field, 
 						     assoc->user);
 				break;
 			default:
+				field->print_routine(
+					field, NULL);
 				break;
 			}
 		}
@@ -562,8 +773,10 @@ static int _print_out_assoc(List assoc_list, bool user)
 	list_iterator_destroy(itr);
 	list_iterator_destroy(itr2);
 	list_destroy(print_fields_list);
-	rc = acct_storage_g_add_associations(db_conn, my_uid, assoc_list);
-	printf("---------------------------------------------------\n\n");
+	if(add)
+		rc = acct_storage_g_add_associations(db_conn, 
+						     my_uid, assoc_list);
+	printf("--------------------------------------------------------------\n\n");
 
 	return rc;
 }
@@ -576,75 +789,62 @@ static int _mod_cluster(sacctmgr_file_opts_t *file_opts,
 	acct_association_cond_t assoc_cond;
 	char *my_info = NULL;
 
-	memset(&mod_assoc, 0, sizeof(acct_association_rec_t));
-
-	mod_assoc.fairshare = NO_VAL;
-	mod_assoc.max_cpu_secs_per_job = NO_VAL;
-	mod_assoc.max_jobs = NO_VAL;
-	mod_assoc.max_nodes_per_job = NO_VAL;
-	mod_assoc.max_wall_duration_per_job = NO_VAL;
-
+	init_acct_association_rec(&mod_assoc);
 	memset(&assoc_cond, 0, sizeof(acct_association_cond_t));
 
-	assoc_cond.fairshare = NO_VAL;
-	assoc_cond.max_cpu_secs_per_job = NO_VAL;
-	assoc_cond.max_jobs = NO_VAL;
-	assoc_cond.max_nodes_per_job = NO_VAL;
-	assoc_cond.max_wall_duration_per_job = NO_VAL;
-
-	if(cluster->default_fairshare != file_opts->fairshare) {
+	if(cluster->root_assoc->fairshare != file_opts->fairshare) {
 		mod_assoc.fairshare = file_opts->fairshare;
 		changed = 1;
 		xstrfmtcat(my_info, 
 			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
 			   " Changed fairshare", "Cluster",
 			   cluster->name,
-			   cluster->default_fairshare,
+			   cluster->root_assoc->fairshare,
 			   file_opts->fairshare); 
 	}
-	if(cluster->default_max_cpu_secs_per_job != 
-	   file_opts->max_cpu_secs_per_job) {
-		mod_assoc.max_cpu_secs_per_job = 
-			file_opts->max_cpu_secs_per_job;
+	if(cluster->root_assoc->max_cpu_mins_pj != 
+	   file_opts->max_cpu_mins_pj) {
+		mod_assoc.max_cpu_mins_pj = 
+			file_opts->max_cpu_mins_pj;
 		changed = 1;
 		xstrfmtcat(my_info, 
 			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
-			   " Changed MaxCPUSecsPerJob", "Cluster",
+			   " Changed MaxCPUMinsPerJob", "Cluster",
 			   cluster->name,
-			   cluster->default_max_cpu_secs_per_job,
-			   file_opts->max_cpu_secs_per_job);
+			   cluster->root_assoc->max_cpu_mins_pj,
+			   file_opts->max_cpu_mins_pj);
 	}
-	if(cluster->default_max_jobs != file_opts->max_jobs) {
+	if(cluster->root_assoc->max_jobs != file_opts->max_jobs) {
 		mod_assoc.max_jobs = file_opts->max_jobs;
 		changed = 1;
 		xstrfmtcat(my_info, 
 			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
 			   " Changed MaxJobs", "Cluster",
 			   cluster->name,
-			   cluster->default_max_jobs,
+			   cluster->root_assoc->max_jobs,
 			   file_opts->max_jobs);
 	}
-	if(cluster->default_max_nodes_per_job != file_opts->max_nodes_per_job) {
-		mod_assoc.max_nodes_per_job = file_opts->max_nodes_per_job;
+	if(cluster->root_assoc->max_nodes_pj != file_opts->max_nodes_pj) {
+		mod_assoc.max_nodes_pj = file_opts->max_nodes_pj;
 		changed = 1;
 		xstrfmtcat(my_info, 
 			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
 			   " Changed MaxNodesPerJob", "Cluster",
 			   cluster->name,
-			   cluster->default_max_nodes_per_job, 
-			   file_opts->max_nodes_per_job);
+			   cluster->root_assoc->max_nodes_pj, 
+			   file_opts->max_nodes_pj);
 	}
-	if(cluster->default_max_wall_duration_per_job !=
-	   file_opts->max_wall_duration_per_job) {
-		mod_assoc.max_wall_duration_per_job =
-			file_opts->max_wall_duration_per_job;
+	if(cluster->root_assoc->max_wall_pj !=
+	   file_opts->max_wall_pj) {
+		mod_assoc.max_wall_pj =
+			file_opts->max_wall_pj;
 		changed = 1;
 		xstrfmtcat(my_info, 
 			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
 			   " Changed MaxWallDurationPerJob", "Cluster",
 			   cluster->name,
-			   cluster->default_max_wall_duration_per_job,
-			   file_opts->max_wall_duration_per_job);
+			   cluster->root_assoc->max_wall_pj,
+			   file_opts->max_wall_pj);
 	}
 
 	if(changed) {
@@ -729,56 +929,6 @@ static int _mod_acct(sacctmgr_file_opts_t *file_opts,
 		changed = 1;
 	} else
 		xfree(org);
-
-	if(acct->qos_list && list_count(acct->qos_list)
-	   && file_opts->qos_list && list_count(file_opts->qos_list)) {
-		ListIterator now_qos_itr = list_iterator_create(acct->qos_list),
-			new_qos_itr = list_iterator_create(file_opts->qos_list);
-		char *now_qos = NULL, *new_qos = NULL;
-
-		if(!mod_acct.qos_list)
-			mod_acct.qos_list = list_create(slurm_destroy_char);
-		while((new_qos = list_next(new_qos_itr))) {
-			while((now_qos = list_next(now_qos_itr))) {
-				if(!strcmp(new_qos, now_qos))
-					break;
-			}
-			list_iterator_reset(now_qos_itr);
-			if(!now_qos) 
-				list_append(mod_acct.qos_list,
-					    xstrdup(new_qos));
-		}
-		list_iterator_destroy(new_qos_itr);
-		list_iterator_destroy(now_qos_itr);
-		if(mod_acct.qos_list && list_count(mod_acct.qos_list))
-			new_qos = get_qos_complete_str(qos_list,
-						       mod_acct.qos_list);
-		if(new_qos) {
-			xstrfmtcat(my_info, 
-				   " Adding QOS for account '%s' '%s'\n",
-				   acct->name,
-				   new_qos);
-			xfree(new_qos);
-			changed = 1;
-		} else {
-			list_destroy(mod_acct.qos_list);
-			mod_acct.qos_list = NULL;
-		}
-	} else if(file_opts->qos_list && list_count(file_opts->qos_list)) {
-		char *new_qos = get_qos_complete_str(qos_list,
-						     file_opts->qos_list);
-		
-		if(new_qos) {
-			xstrfmtcat(my_info, 
-				   " Adding QOS for account '%s' '%s'\n",
-				   acct->name,
-				   new_qos);
-			xfree(new_qos);
-			mod_acct.qos_list = file_opts->qos_list;
-			file_opts->qos_list = NULL;
-			changed = 1;
-		}
-	}
 									
 	if(changed) {
 		List ret_list = NULL;
@@ -795,9 +945,6 @@ static int _mod_acct(sacctmgr_file_opts_t *file_opts,
 	
 		list_destroy(assoc_cond.acct_list);
 
-		if(mod_acct.qos_list)
-			list_destroy(mod_acct.qos_list);
-
 /* 		if(ret_list && list_count(ret_list)) { */
 /* 			char *object = NULL; */
 /* 			ListIterator itr = list_iterator_create(ret_list); */
@@ -859,55 +1006,6 @@ static int _mod_user(sacctmgr_file_opts_t *file_opts,
 		changed = 1;
 	} else
 		xfree(def_acct);
-				
-	if(user->qos_list && list_count(user->qos_list)
-	   && file_opts->qos_list && list_count(file_opts->qos_list)) {
-		ListIterator now_qos_itr = list_iterator_create(user->qos_list),
-			new_qos_itr = list_iterator_create(file_opts->qos_list);
-		char *now_qos = NULL, *new_qos = NULL;
-
-		if(!mod_user.qos_list)
-			mod_user.qos_list = list_create(slurm_destroy_char);
-		while((new_qos = list_next(new_qos_itr))) {
-			while((now_qos = list_next(now_qos_itr))) {
-				if(!strcmp(new_qos, now_qos))
-					break;
-			}
-			list_iterator_reset(now_qos_itr);
-			if(!now_qos) 
-				list_append(mod_user.qos_list,
-					    xstrdup(new_qos));
-		}
-		list_iterator_destroy(new_qos_itr);
-		list_iterator_destroy(now_qos_itr);
-		if(mod_user.qos_list && list_count(mod_user.qos_list))
-			new_qos = get_qos_complete_str(qos_list,
-						       mod_user.qos_list);
-		if(new_qos) {
-			xstrfmtcat(my_info, 
-				   " Adding QOS for user '%s' '%s'\n",
-				   user->name,
-				   new_qos);
-			xfree(new_qos);
-			changed = 1;
-		} else 
-			list_destroy(mod_user.qos_list);
-
-	} else if(file_opts->qos_list && list_count(file_opts->qos_list)) {
-		char *new_qos = get_qos_complete_str(qos_list,
-						     file_opts->qos_list);
-		
-		if(new_qos) {
-			xstrfmtcat(my_info, 
-				   " Adding QOS for user '%s' '%s'\n",
-				   user->name,
-				   new_qos);
-			xfree(new_qos);
-			mod_user.qos_list = file_opts->qos_list;
-			file_opts->qos_list = NULL;
-			changed = 1;
-		}
-	}
 									
 	if(user->admin_level != ACCT_ADMIN_NOTSET
 	   && file_opts->admin != ACCT_ADMIN_NOTSET
@@ -931,9 +1029,6 @@ static int _mod_user(sacctmgr_file_opts_t *file_opts,
 			&user_cond, 
 			&mod_user);
 		notice_thread_fini();
-
-		if(mod_user.qos_list)
-			list_destroy(mod_user.qos_list);
 					
 /* 		if(ret_list && list_count(ret_list)) { */
 /* 			char *object = NULL; */
@@ -1063,23 +1158,9 @@ static int _mod_assoc(sacctmgr_file_opts_t *file_opts,
 		return 0;
 		break;
 	}
-
-	memset(&mod_assoc, 0, sizeof(acct_association_rec_t));
-
-	mod_assoc.fairshare = NO_VAL;
-	mod_assoc.max_cpu_secs_per_job = NO_VAL;
-	mod_assoc.max_jobs = NO_VAL;
-	mod_assoc.max_nodes_per_job = NO_VAL;
-	mod_assoc.max_wall_duration_per_job = NO_VAL;
-
+	init_acct_association_rec(&mod_assoc);
 	memset(&assoc_cond, 0, sizeof(acct_association_cond_t));
 
-	assoc_cond.fairshare = NO_VAL;
-	assoc_cond.max_cpu_secs_per_job = NO_VAL;
-	assoc_cond.max_jobs = NO_VAL;
-	assoc_cond.max_nodes_per_job = NO_VAL;
-	assoc_cond.max_wall_duration_per_job = NO_VAL;
-
 	if(assoc->fairshare != file_opts->fairshare) {
 		mod_assoc.fairshare = file_opts->fairshare;
 		changed = 1;
@@ -1090,17 +1171,96 @@ static int _mod_assoc(sacctmgr_file_opts_t *file_opts,
 			   assoc->fairshare,
 			   file_opts->fairshare);
 	}
-	if(assoc->max_cpu_secs_per_job != file_opts->max_cpu_secs_per_job) {
-		mod_assoc.max_cpu_secs_per_job =
-			file_opts->max_cpu_secs_per_job;
+
+	if(assoc->grp_cpu_mins != file_opts->grp_cpu_mins) {
+		mod_assoc.grp_cpu_mins = file_opts->grp_cpu_mins;
+		changed = 1;
+		xstrfmtcat(my_info, 
+			   "%-30.30s for %-7.7s %-10.10s %8ull -> %ull\n",
+			   " Changed GrpCPUMins",
+			   type, name,
+			   assoc->grp_cpu_mins,
+			   file_opts->grp_cpu_mins);
+	}
+
+	if(assoc->grp_cpus != file_opts->grp_cpus) {
+		mod_assoc.grp_cpus = file_opts->grp_cpus;
+		changed = 1;
+		xstrfmtcat(my_info, 
+			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
+			   " Changed GrpCpus",
+			   type, name,
+			   assoc->grp_cpus, 
+			   file_opts->grp_cpus);
+	}
+
+	if(assoc->grp_jobs != file_opts->grp_jobs) {
+		mod_assoc.grp_jobs = file_opts->grp_jobs;
+		changed = 1;
+		xstrfmtcat(my_info, 
+			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
+			   " Changed GrpJobs",
+			   type, name,
+			   assoc->grp_jobs,
+			   file_opts->grp_jobs);
+	}
+
+	if(assoc->grp_nodes != file_opts->grp_nodes) {
+		mod_assoc.grp_nodes = file_opts->grp_nodes;
+		changed = 1;
+		xstrfmtcat(my_info, 
+			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
+			   " Changed GrpNodes",
+			   type, name,
+			   assoc->grp_nodes, 
+			   file_opts->grp_nodes);
+	}
+
+	if(assoc->grp_submit_jobs != file_opts->grp_submit_jobs) {
+		mod_assoc.grp_submit_jobs = file_opts->grp_submit_jobs;
+		changed = 1;
+		xstrfmtcat(my_info, 
+			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
+			   " Changed GrpSubmitJobs",
+			   type, name,
+			   assoc->grp_submit_jobs,
+			   file_opts->grp_submit_jobs);
+	}
+
+	if(assoc->grp_wall != file_opts->grp_wall) {
+		mod_assoc.grp_wall = file_opts->grp_wall;
+		changed = 1;
+		xstrfmtcat(my_info, 
+			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
+			   " Changed GrpWallDuration",
+			   type, name,
+			   assoc->grp_wall,
+			   file_opts->grp_wall);
+	}
+
+	if(assoc->max_cpu_mins_pj != file_opts->max_cpu_mins_pj) {
+		mod_assoc.max_cpu_mins_pj =
+			file_opts->max_cpu_mins_pj;
+		changed = 1;
+		xstrfmtcat(my_info, 
+			   "%-30.30s for %-7.7s %-10.10s %8ull -> %ull\n",
+			   " Changed MaxCPUMinsPerJob",
+			   type, name,
+			   assoc->max_cpu_mins_pj,
+			   file_opts->max_cpu_mins_pj);
+	}
+
+	if(assoc->max_cpus_pj != file_opts->max_cpus_pj) {
+		mod_assoc.max_cpus_pj = file_opts->max_cpus_pj;
 		changed = 1;
 		xstrfmtcat(my_info, 
 			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
-			   " Changed MaxCPUSecsPerJob",
+			   " Changed MaxCpusPerJob",
 			   type, name,
-			   assoc->max_cpu_secs_per_job,
-			   file_opts->max_cpu_secs_per_job);
+			   assoc->max_cpus_pj, 
+			   file_opts->max_cpus_pj);
 	}
+
 	if(assoc->max_jobs != file_opts->max_jobs) {
 		mod_assoc.max_jobs = file_opts->max_jobs;
 		changed = 1;
@@ -1111,27 +1271,91 @@ static int _mod_assoc(sacctmgr_file_opts_t *file_opts,
 			   assoc->max_jobs,
 			   file_opts->max_jobs);
 	}
-	if(assoc->max_nodes_per_job != file_opts->max_nodes_per_job) {
-		mod_assoc.max_nodes_per_job = file_opts->max_nodes_per_job;
+
+	if(assoc->max_nodes_pj != file_opts->max_nodes_pj) {
+		mod_assoc.max_nodes_pj = file_opts->max_nodes_pj;
 		changed = 1;
 		xstrfmtcat(my_info, 
 			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
 			   " Changed MaxNodesPerJob",
 			   type, name,
-			   assoc->max_nodes_per_job, 
-			   file_opts->max_nodes_per_job);
+			   assoc->max_nodes_pj, 
+			   file_opts->max_nodes_pj);
 	}
-	if(assoc->max_wall_duration_per_job !=
-	   file_opts->max_wall_duration_per_job) {
-		mod_assoc.max_wall_duration_per_job =
-			file_opts->max_wall_duration_per_job;
+
+	if(assoc->max_submit_jobs != file_opts->max_submit_jobs) {
+		mod_assoc.max_submit_jobs = file_opts->max_submit_jobs;
+		changed = 1;
+		xstrfmtcat(my_info, 
+			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
+			   " Changed MaxSubmitJobs",
+			   type, name,
+			   assoc->max_submit_jobs,
+			   file_opts->max_submit_jobs);
+	}
+
+	if(assoc->max_wall_pj != file_opts->max_wall_pj) {
+		mod_assoc.max_wall_pj =	file_opts->max_wall_pj;
 		changed = 1;
 		xstrfmtcat(my_info, 
 			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
 			   " Changed MaxWallDurationPerJob",
 			   type, name,
-			   assoc->max_wall_duration_per_job,
-			   file_opts->max_wall_duration_per_job);
+			   assoc->max_wall_pj,
+			   file_opts->max_wall_pj);
+	}
+
+	if(assoc->qos_list && list_count(assoc->qos_list)
+	   && file_opts->qos_list && list_count(file_opts->qos_list)) {
+		ListIterator now_qos_itr =
+			list_iterator_create(assoc->qos_list),
+			new_qos_itr = list_iterator_create(file_opts->qos_list);
+		char *now_qos = NULL, *new_qos = NULL;
+
+		if(!mod_assoc.qos_list)
+			mod_assoc.qos_list = list_create(slurm_destroy_char);
+		while((new_qos = list_next(new_qos_itr))) {
+			while((now_qos = list_next(now_qos_itr))) {
+				if(!strcmp(new_qos, now_qos))
+					break;
+			}
+			list_iterator_reset(now_qos_itr);
+			if(!now_qos) 
+				list_append(mod_assoc.qos_list,
+					    xstrdup(new_qos));
+		}
+		list_iterator_destroy(new_qos_itr);
+		list_iterator_destroy(now_qos_itr);
+		if(mod_assoc.qos_list && list_count(mod_assoc.qos_list))
+			new_qos = get_qos_complete_str(qos_list,
+						       mod_assoc.qos_list);
+		if(new_qos) {
+			xstrfmtcat(my_info, 
+				   "%-30.30s for %-7.7s %-10.10s %8s\n",
+				   " Added QOS",
+				   type, name,
+				   new_qos);
+			xfree(new_qos);
+			changed = 1;
+		} else {
+			list_destroy(mod_assoc.qos_list);
+			mod_assoc.qos_list = NULL;
+		}
+	} else if(file_opts->qos_list && list_count(file_opts->qos_list)) {
+		char *new_qos = get_qos_complete_str(qos_list,
+						     file_opts->qos_list);
+		
+		if(new_qos) {
+			xstrfmtcat(my_info, 
+				   "%-30.30s for %-7.7s %-10.10s %8s\n",
+				   " Added QOS",
+				   type, name,
+				   new_qos);
+			xfree(new_qos);
+			mod_assoc.qos_list = file_opts->qos_list;
+			file_opts->qos_list = NULL;
+			changed = 1;
+		}
 	}
 
 	if(changed) {
@@ -1162,6 +1386,9 @@ static int _mod_assoc(sacctmgr_file_opts_t *file_opts,
 			&mod_assoc);
 		notice_thread_fini();
 					
+		if(mod_assoc.qos_list)
+			list_destroy(mod_assoc.qos_list);
+
 		list_destroy(assoc_cond.cluster_list);
 		list_destroy(assoc_cond.acct_list);
 		if(assoc_cond.user_list)
@@ -1203,8 +1430,6 @@ static acct_user_rec_t *_set_user_up(sacctmgr_file_opts_t *file_opts,
 	else
 		user->default_acct = xstrdup(parent);
 	
-	user->qos_list = file_opts->qos_list;
-	file_opts->qos_list = NULL;
 	user->admin_level = file_opts->admin;
 	
 	if(file_opts->coord_list) {
@@ -1259,12 +1484,74 @@ static acct_account_rec_t *_set_acct_up(sacctmgr_file_opts_t *file_opts,
 	/* info("adding acct %s (%s) (%s)", */
 /* 	        acct->name, acct->description, */
 /* 		acct->organization); */
-	acct->qos_list = file_opts->qos_list;
-	file_opts->qos_list = NULL;
 
 	return acct;
 }
 
+static acct_association_rec_t *_set_assoc_up(sacctmgr_file_opts_t *file_opts,
+					     sacctmgr_mod_type_t mod_type,
+					     char *cluster, char *parent)
+{
+	acct_association_rec_t *assoc = NULL;
+
+	if(!cluster) {
+		error("No cluster name was given for _set_assoc_up");
+		return NULL;
+	}
+
+	if(!parent && (mod_type != MOD_CLUSTER)) {
+		error("No parent was given for _set_assoc_up");
+		return NULL;
+	}
+
+	assoc = xmalloc(sizeof(acct_association_rec_t));
+	init_acct_association_rec(assoc);
+
+	switch(mod_type) {
+	case MOD_CLUSTER:
+		assoc->acct = xstrdup(parent);
+		assoc->cluster = xstrdup(cluster);
+		break;
+	case MOD_ACCT:
+		assoc->acct = xstrdup(file_opts->name);
+		assoc->cluster = xstrdup(cluster);
+		assoc->parent_acct = xstrdup(parent);
+		break;
+	case MOD_USER:
+		assoc->acct = xstrdup(parent);
+		assoc->cluster = xstrdup(cluster);
+		assoc->partition = xstrdup(file_opts->part);
+		assoc->user = xstrdup(file_opts->name);
+		break;
+	default:
+		error("Unknown mod type for _set_assoc_up %d", mod_type);
+		destroy_acct_association_rec(assoc);
+		assoc = NULL;
+		break;
+	}
+
+	
+	assoc->fairshare = file_opts->fairshare;
+	
+	assoc->grp_cpu_mins = file_opts->grp_cpu_mins;
+	assoc->grp_cpus = file_opts->grp_cpus;
+	assoc->grp_jobs = file_opts->grp_jobs;
+	assoc->grp_nodes = file_opts->grp_nodes;
+	assoc->grp_submit_jobs = file_opts->grp_submit_jobs;
+	assoc->grp_wall = file_opts->grp_wall;
+	
+	assoc->max_jobs = file_opts->max_jobs;
+	assoc->max_nodes_pj = file_opts->max_nodes_pj;
+	assoc->max_wall_pj = file_opts->max_wall_pj;
+	assoc->max_cpu_mins_pj = file_opts->max_cpu_mins_pj;
+
+	if(file_opts->qos_list && list_count(file_opts->qos_list)) 
+		assoc->qos_list = copy_char_list(file_opts->qos_list);
+
+
+	return assoc;
+}
+
 static int _print_file_sacctmgr_assoc_childern(FILE *fd, 
 					       List sacctmgr_assoc_list,
 					       List user_list,
@@ -1283,6 +1570,9 @@ static int _print_file_sacctmgr_assoc_childern(FILE *fd,
 				user_list, sacctmgr_assoc->assoc->user);
 			line = xstrdup_printf(
 				"User - %s", sacctmgr_assoc->sort_name);
+			if(sacctmgr_assoc->assoc->partition) 
+				xstrfmtcat(line, ":Partition='%s'", 
+					   sacctmgr_assoc->assoc->partition);
 			if(user_rec) {
 				xstrfmtcat(line, ":DefaultAccount='%s'",
 					   user_rec->default_acct);
@@ -1291,22 +1581,6 @@ static int _print_file_sacctmgr_assoc_childern(FILE *fd,
 						   acct_admin_level_str(
 							   user_rec->
 							   admin_level));
-				if(user_rec->qos_list 
-				   && list_count(user_rec->qos_list)) {
-					char *temp_char = NULL;
-					if(!qos_list) {
-						qos_list = 
-							acct_storage_g_get_qos(
-								db_conn, my_uid,
-								NULL);
-					}
-					temp_char = get_qos_complete_str(
-						qos_list, user_rec->qos_list);
-					xstrfmtcat(line, ":QOS='%s'",
-						   temp_char);
-					xfree(temp_char);
-				}
-
 				if(user_rec->coord_accts
 				   && list_count(user_rec->coord_accts)) {
 					ListIterator itr2 = NULL;
@@ -1349,43 +1623,10 @@ static int _print_file_sacctmgr_assoc_childern(FILE *fd,
 					   acct_rec->description);
 				xstrfmtcat(line, ":Organization='%s'",
 					   acct_rec->organization);
-				if(acct_rec->qos_list) {
-					char *temp_char = get_qos_complete_str(
-						qos_list, acct_rec->qos_list);
-					if(temp_char) {			
-						xstrfmtcat(line, ":QOS='%s'",
-							   temp_char);
-						xfree(temp_char);
-					}
-				}
 			}
 		}
-		if(sacctmgr_assoc->assoc->partition) 
-			xstrfmtcat(line, ":Partition='%s'", 
-				   sacctmgr_assoc->assoc->partition);
 			
-		if(sacctmgr_assoc->assoc->fairshare != INFINITE)
-			xstrfmtcat(line, ":Fairshare=%u", 
-				   sacctmgr_assoc->assoc->fairshare);
-		
-		if(sacctmgr_assoc->assoc->max_cpu_secs_per_job != INFINITE)
-			xstrfmtcat(line, ":MaxCPUSecs=%u",
-				   sacctmgr_assoc->assoc->max_cpu_secs_per_job);
-		
-		if(sacctmgr_assoc->assoc->max_jobs != INFINITE) 
-			xstrfmtcat(line, ":MaxJobs=%u",
-				   sacctmgr_assoc->assoc->max_jobs);
-		
-		if(sacctmgr_assoc->assoc->max_nodes_per_job != INFINITE)
-			xstrfmtcat(line, ":MaxNodes=%u",
-				   sacctmgr_assoc->assoc->max_nodes_per_job);
-		
-		if(sacctmgr_assoc->assoc->max_wall_duration_per_job 
-		   != INFINITE)
- 			xstrfmtcat(line, ":MaxWallDurationPerJob=%u",
-				   sacctmgr_assoc->assoc->
-				   max_wall_duration_per_job);
-
+		print_file_add_limits_to_line(&line, sacctmgr_assoc->assoc);
 
 		if(fprintf(fd, "%s\n", line) < 0) {
 			exit_code=1;
@@ -1401,6 +1642,72 @@ static int _print_file_sacctmgr_assoc_childern(FILE *fd,
 	return SLURM_SUCCESS;
 }
 
+extern int print_file_add_limits_to_line(char **line,
+					 acct_association_rec_t *assoc)
+{
+	static List qos_list = NULL; /* This is a leak, since we never
+				      * free it, but we don't
+				      * really care since this isn't a
+				      * deamon.
+				      */
+	if(!assoc)
+		return SLURM_ERROR;
+
+	if(assoc->fairshare != INFINITE)
+		xstrfmtcat(*line, ":Fairshare=%u", assoc->fairshare);
+		
+	if(assoc->grp_cpu_mins != INFINITE)
+		xstrfmtcat(*line, ":GrpCPUMins=%llu", assoc->grp_cpu_mins);
+		
+	if(assoc->grp_cpus != INFINITE)
+		xstrfmtcat(*line, ":GrpCPUs=%u", assoc->grp_cpus);
+		
+	if(assoc->grp_jobs != INFINITE) 
+		xstrfmtcat(*line, ":GrpJobs=%u", assoc->grp_jobs);
+		
+	if(assoc->grp_nodes != INFINITE)
+		xstrfmtcat(*line, ":GrpNodes=%u", assoc->grp_nodes);
+		
+	if(assoc->grp_submit_jobs != INFINITE) 
+		xstrfmtcat(*line, ":GrpSubmitJobs=%u", assoc->grp_submit_jobs);
+		
+	if(assoc->grp_wall != INFINITE)
+		xstrfmtcat(*line, ":GrpWall=%u", assoc->grp_wall);
+
+	if(assoc->max_cpu_mins_pj != INFINITE)
+		xstrfmtcat(*line, ":MaxCPUMins=%llu", assoc->max_cpu_mins_pj);
+		
+	if(assoc->max_cpus_pj != INFINITE)
+		xstrfmtcat(*line, ":MaxCPUs=%u", assoc->max_cpus_pj);
+		
+	if(assoc->max_jobs != INFINITE) 
+		xstrfmtcat(*line, ":MaxJobs=%u", assoc->max_jobs);
+		
+	if(assoc->max_nodes_pj != INFINITE)
+		xstrfmtcat(*line, ":MaxNodes=%u", assoc->max_nodes_pj);
+		
+	if(assoc->max_submit_jobs != INFINITE) 
+		xstrfmtcat(*line, ":MaxSubmitJobs=%u", assoc->max_submit_jobs);
+		
+	if(assoc->max_wall_pj != INFINITE)
+		xstrfmtcat(*line, ":MaxWallDurationPerJob=%u",
+			   assoc->max_wall_pj);
+
+	if(assoc->qos_list && list_count(assoc->qos_list)) {
+		char *temp_char = NULL;
+		if(!qos_list) 
+			qos_list = acct_storage_g_get_qos(
+				db_conn, my_uid, NULL);
+		
+		temp_char = get_qos_complete_str(qos_list, assoc->qos_list);
+		xstrfmtcat(*line, ":QOS='%s'", temp_char);
+		xfree(temp_char);
+	}
+
+	return SLURM_SUCCESS;
+}
+
+
 extern int print_file_sacctmgr_assoc_list(FILE *fd, 
 					  List sacctmgr_assoc_list,
 					  List user_list,
@@ -1505,7 +1812,8 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 		return;
 		
 	} else {
-		if(user->admin_level < ACCT_ADMIN_SUPER_USER) {
+		if(my_uid != slurm_get_slurm_user_id() && my_uid != 0
+		   && user->admin_level < ACCT_ADMIN_SUPER_USER) {
 			exit_code=1;
 			fprintf(stderr, " Your user does not have sufficient "
 				"privileges to load files.\n");
@@ -1601,13 +1909,9 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 				break;
 			} 
 		}
-		if(!object[0]) {
-			exit_code=1;
-			fprintf(stderr, " Misformatted line(%d): %s\n",
-				lc, line);
-			rc = SLURM_ERROR;
-			break;
-		} 
+		if(!object[0]) 
+			continue;
+		
 		while(line[start] != ' ' && start<len)
 			start++;
 		if(start>=len) {
@@ -1685,21 +1989,23 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 
 			if(!(cluster = sacctmgr_find_cluster_from_list(
 				     curr_cluster_list, cluster_name))) {
+				List temp_assoc_list = list_create(NULL);
 				List cluster_list =
 					list_create(destroy_acct_cluster_rec);
+
 				cluster = xmalloc(sizeof(acct_cluster_rec_t));
 				list_append(cluster_list, cluster);
 				cluster->name = xstrdup(cluster_name);
-				cluster->default_fairshare =
-					file_opts->fairshare;		
-				cluster->default_max_cpu_secs_per_job = 
-					file_opts->max_cpu_secs_per_job;
-				cluster->default_max_jobs = file_opts->max_jobs;
-				cluster->default_max_nodes_per_job = 
-					file_opts->max_nodes_per_job;
-				cluster->default_max_wall_duration_per_job = 
-					file_opts->max_wall_duration_per_job;
+				cluster->root_assoc = _set_assoc_up(
+					file_opts, MOD_CLUSTER,
+					cluster_name, "root");
+				list_append(temp_assoc_list,
+					    cluster->root_assoc);
+				
+				rc = _print_out_assoc(temp_assoc_list, 0, 0);
+				list_destroy(temp_assoc_list);
 				notice_thread_init();
+				
 				rc = acct_storage_g_add_clusters(
 					db_conn, my_uid, cluster_list);
 				notice_thread_fini();
@@ -1722,6 +2028,7 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 			
 			memset(&assoc_cond, 0, sizeof(acct_association_cond_t));
 			assoc_cond.cluster_list = list_create(NULL);
+			assoc_cond.with_raw_qos = 1;
 			assoc_cond.without_parent_limits = 1;
 			list_append(assoc_cond.cluster_list, cluster_name);
 			curr_assoc_list = acct_storage_g_get_associations(
@@ -1801,18 +2108,9 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 				/* don't add anything to the
 				   curr_acct_list */
 
-				assoc = xmalloc(sizeof(acct_association_rec_t));
-				assoc->acct = xstrdup(file_opts->name);
-				assoc->cluster = xstrdup(cluster_name);
-				assoc->parent_acct = xstrdup(parent);
-				assoc->fairshare = file_opts->fairshare;
-				assoc->max_jobs = file_opts->max_jobs;
-				assoc->max_nodes_per_job =
-					file_opts->max_nodes_per_job;
-				assoc->max_wall_duration_per_job =
-					file_opts->max_wall_duration_per_job;
-				assoc->max_cpu_secs_per_job = 
-					file_opts->max_cpu_secs_per_job;
+				assoc = _set_assoc_up(file_opts, MOD_ACCT,
+						      cluster_name, parent);
+
 				list_append(acct_assoc_list, assoc);
 				/* don't add anything to the
 				   curr_assoc_list */
@@ -1837,18 +2135,10 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 				} else {
 					debug2("already modified this account");
 				}
-				assoc = xmalloc(sizeof(acct_association_rec_t));
-				assoc->acct = xstrdup(file_opts->name);
-				assoc->cluster = xstrdup(cluster_name);
-				assoc->parent_acct = xstrdup(parent);
-				assoc->fairshare = file_opts->fairshare;
-				assoc->max_jobs = file_opts->max_jobs;
-				assoc->max_nodes_per_job =
-					file_opts->max_nodes_per_job;
-				assoc->max_wall_duration_per_job =
-					file_opts->max_wall_duration_per_job;
-				assoc->max_cpu_secs_per_job = 
-					file_opts->max_cpu_secs_per_job;
+
+				assoc = _set_assoc_up(file_opts, MOD_ACCT,
+						      cluster_name, parent);
+
 				list_append(acct_assoc_list, assoc);
 				/* don't add anything to the
 				   curr_assoc_list */
@@ -1875,6 +2165,7 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 				if(!assoc2) {
 					assoc2 = xmalloc(
 						sizeof(acct_association_rec_t));
+					init_acct_association_rec(assoc2);
 					list_append(mod_assoc_list, assoc2);
 					assoc2->cluster = xstrdup(cluster_name);
 					assoc2->acct = xstrdup(file_opts->name);
@@ -1907,20 +2198,9 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 				/* don't add anything to the
 				   curr_user_list */
 
-				assoc = xmalloc(sizeof(acct_association_rec_t));
-				assoc->acct = xstrdup(parent);
-				assoc->cluster = xstrdup(cluster_name);
-				assoc->fairshare = file_opts->fairshare;
-				assoc->max_jobs = file_opts->max_jobs;
-				assoc->max_nodes_per_job =
-					file_opts->max_nodes_per_job;
-				assoc->max_wall_duration_per_job =
-					file_opts->max_wall_duration_per_job;
-				assoc->max_cpu_secs_per_job = 
-					file_opts->max_cpu_secs_per_job;
-				assoc->partition = xstrdup(file_opts->part);
-				assoc->user = xstrdup(file_opts->name);
-				
+				assoc = _set_assoc_up(file_opts, MOD_USER,
+						      cluster_name, parent);
+
 				list_append(user_assoc_list, assoc);
 				/* don't add anything to the
 				   curr_assoc_list */
@@ -1957,20 +2237,9 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 					debug2("already modified this user");
 				}
 			new_association:
-				assoc = xmalloc(sizeof(acct_association_rec_t));
-				assoc->acct = xstrdup(parent);
-				assoc->cluster = xstrdup(cluster_name);
-				assoc->fairshare = file_opts->fairshare;
-				assoc->max_jobs = file_opts->max_jobs;
-				assoc->max_nodes_per_job =
-					file_opts->max_nodes_per_job;
-				assoc->max_wall_duration_per_job =
-					file_opts->max_wall_duration_per_job;
-				assoc->max_cpu_secs_per_job = 
-					file_opts->max_cpu_secs_per_job;
-				assoc->partition = xstrdup(file_opts->part);
-				assoc->user = xstrdup(file_opts->name);
-				
+				assoc = _set_assoc_up(file_opts, MOD_USER,
+						      cluster_name, parent);
+
 				list_append(user_assoc_list, assoc);
 				/* don't add anything to the
 				   curr_assoc_list */
@@ -1997,6 +2266,7 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 				if(!assoc2) {
 					assoc2 = xmalloc(
 						sizeof(acct_association_rec_t));
+					init_acct_association_rec(assoc2);
 					list_append(mod_assoc_list, assoc2);
 					assoc2->cluster = xstrdup(cluster_name);
 					assoc2->acct = xstrdup(parent);
@@ -2053,13 +2323,9 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 					field->print_routine(
 						field, acct->organization);
 					break;
-				case PRINT_QOS:
-					field->print_routine(
-						field,
-						qos_list,
-						acct->qos_list);
-					break;
 				default:
+					field->print_routine(
+						field, NULL);
 					break;
 				}
 			}
@@ -2077,7 +2343,7 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 	
 	if(rc == SLURM_SUCCESS && list_count(acct_assoc_list)) {
 		printf("Account Associations\n");
-		_print_out_assoc(acct_assoc_list, 0);
+		rc = _print_out_assoc(acct_assoc_list, 0, 1);
 		set = 1;
 	}
 	if(rc == SLURM_SUCCESS && list_count(user_list)) {
@@ -2115,13 +2381,9 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 					field->print_routine(
 						field, user->name);
 					break;
-				case PRINT_QOS:
-					field->print_routine(
-						field,
-						qos_list,
-						user->qos_list);
-					break;
 				default:
+					field->print_routine(
+						field, NULL);
 					break;
 				}
 			}
@@ -2140,7 +2402,7 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 	
 	if(rc == SLURM_SUCCESS && list_count(user_assoc_list)) {
 		printf("User Associations\n");
-		_print_out_assoc(user_assoc_list, 1);
+		rc = _print_out_assoc(user_assoc_list, 1, 1);
 		set = 1;
 	}
 	END_TIMER2("add cluster");
diff --git a/src/sacctmgr/qos_functions.c b/src/sacctmgr/qos_functions.c
index f455f1d25..7177594da 100644
--- a/src/sacctmgr/qos_functions.c
+++ b/src/sacctmgr/qos_functions.c
@@ -98,68 +98,207 @@ static int _set_cond(int *start, int argc, char *argv[],
 	return set;
 }
 
-/* static int _set_rec(int *start, int argc, char *argv[], */
-/* 		    acct_qos_rec_t *qos) */
-/* { */
-/* 	int i; */
-/* 	int set = 0; */
-/* 	int end = 0; */
-
-/* 	for (i=(*start); i<argc; i++) { */
-/* 		end = parse_option_end(argv[i]); */
-/* 		if (!strncasecmp (argv[i], "Where", 5)) { */
-/* 			i--; */
-/* 			break; */
-/* 		} else if(!end && !strncasecmp(argv[i], "set", 3)) { */
-/* 			continue; */
-/* 		} else if(!end) { */
-/* 			printf(" Bad format on %s: End your option with " */
-/* 			       "an '=' sign\n", argv[i]); */
-/* 		} else if (!strncasecmp (argv[i], "Description", 1)) { */
-/* 			if(!qos->description) */
-/* 				qos->description = */
-/* 					strip_quotes(argv[i]+end, NULL); */
-/* 			set = 1; */
-/* 		} else if (!strncasecmp (argv[i], "Name", 1)) { */
-/* 			if(!qos->name) */
-/* 				qos->name = strip_quotes(argv[i]+end, NULL); */
-/* 			set = 1; */
-/* 		} else { */
-/* 			printf(" Unknown option: %s\n" */
-/* 			       " Use keyword 'where' to modify condition\n", */
-/* 			       argv[i]); */
-/* 		} */
-/* 	} */
-
-/* 	(*start) = i; */
-
-/* 	return set; */
-/* } */
+static int _set_rec(int *start, int argc, char *argv[],
+		    List qos_list,
+		    acct_qos_rec_t *qos)
+{
+	int i, mins;
+	int set = 0;
+	int end = 0;
+
+	for (i=(*start); i<argc; i++) {
+		end = parse_option_end(argv[i]);
+		if (!strncasecmp (argv[i], "Where", 5)) {
+			i--;
+			break;
+		} else if(!end && !strncasecmp(argv[i], "set", 3)) {
+			continue;
+		} else if(!end
+			  || !strncasecmp (argv[i], "Name", 1)) {
+			if(qos_list) 
+				slurm_addto_char_list(qos_list, argv[i]+end);
+		} else if (!strncasecmp (argv[i], "Description", 1)) {
+			if(!qos->description)
+				qos->description =
+					strip_quotes(argv[i]+end, NULL);
+			set = 1;
+		} else if (!strncasecmp (argv[i], "JobFlags", 1)) {
+			if(!qos->job_flags)
+				qos->job_flags =
+					strip_quotes(argv[i]+end, NULL);
+			set = 1;			
+		} else if (!strncasecmp (argv[i], "GrpCPUMins", 7)) {
+			if(!qos)
+				continue;
+			if (get_uint64(argv[i]+end, 
+				       &qos->grp_cpu_mins, 
+				       "GrpCPUMins") == SLURM_SUCCESS)
+				set = 1;
+		} else if (!strncasecmp (argv[i], "GrpCpus", 7)) {
+			if(!qos)
+				continue;
+			if (get_uint(argv[i]+end, &qos->grp_cpus,
+			    "GrpCpus") == SLURM_SUCCESS)
+				set = 1;
+		} else if (!strncasecmp (argv[i], "GrpJobs", 4)) {
+			if(!qos)
+				continue;
+			if (get_uint(argv[i]+end, &qos->grp_jobs,
+			    "GrpJobs") == SLURM_SUCCESS)
+				set = 1;
+		} else if (!strncasecmp (argv[i], "GrpNodes", 4)) {
+			if(!qos)
+				continue;
+			if (get_uint(argv[i]+end, &qos->grp_nodes,
+			    "GrpNodes") == SLURM_SUCCESS)
+				set = 1;
+		} else if (!strncasecmp (argv[i], "GrpSubmitJobs", 4)) {
+			if(!qos)
+				continue;
+			if (get_uint(argv[i]+end, &qos->grp_submit_jobs,
+			    "GrpSubmitJobs") == SLURM_SUCCESS)
+				set = 1;
+		} else if (!strncasecmp (argv[i], "GrpWall", 4)) {
+			if(!qos)
+				continue;
+			mins = time_str2mins(argv[i]+end);
+			if (mins != NO_VAL) {
+				qos->grp_wall	= (uint32_t) mins;
+				set = 1;
+			} else {
+				exit_code=1;
+				fprintf(stderr, 
+					" Bad GrpWall time format: %s\n", 
+					argv[i]);
+			}
+		} else if (!strncasecmp (argv[i], "MaxCPUMins", 7)) {
+			if(!qos)
+				continue;
+			if (get_uint64(argv[i]+end, 
+				       &qos->max_cpu_mins_pu, 
+				       "MaxCPUMins") == SLURM_SUCCESS)
+				set = 1;
+		} else if (!strncasecmp (argv[i], "MaxCpus", 7)) {
+			if(!qos)
+				continue;
+			if (get_uint(argv[i]+end, &qos->max_cpus_pu,
+			    "MaxCpus") == SLURM_SUCCESS)
+				set = 1;
+		} else if (!strncasecmp (argv[i], "MaxJobs", 4)) {
+			if(!qos)
+				continue;
+			if (get_uint(argv[i]+end, &qos->max_jobs_pu,
+			    "MaxJobs") == SLURM_SUCCESS)
+				set = 1;
+		} else if (!strncasecmp (argv[i], "MaxNodes", 4)) {
+			if(!qos)
+				continue;
+			if (get_uint(argv[i]+end, 
+			    &qos->max_nodes_pu,
+			    "MaxNodes") == SLURM_SUCCESS)
+				set = 1;
+		} else if (!strncasecmp (argv[i], "MaxSubmitJobs", 4)) {
+			if(!qos)
+				continue;
+			if (get_uint(argv[i]+end, &qos->max_submit_jobs_pu,
+			    "MaxSubmitJobs") == SLURM_SUCCESS)
+				set = 1;
+		} else if (!strncasecmp (argv[i], "MaxWall", 4)) {
+			if(!qos)
+				continue;
+			mins = time_str2mins(argv[i]+end);
+			if (mins != NO_VAL) {
+				qos->max_wall_pu = (uint32_t) mins;
+				set = 1;
+			} else {
+				exit_code=1;
+				fprintf(stderr, 
+					" Bad MaxWall time format: %s\n", 
+					argv[i]);
+			}
+		} else if (!strncasecmp (argv[i], "Preemptee", 9)) {
+			int option = 0;
+			if(!qos)
+				continue;
+
+			if(!qos->preemptee_list) 
+				qos->preemptee_list = 
+					list_create(slurm_destroy_char);
+						
+			if(!qos_list) 
+				qos_list = acct_storage_g_get_qos(
+					db_conn, my_uid, NULL);
+						
+			if(end > 2 && argv[i][end-1] == '='
+			   && (argv[i][end-2] == '+' 
+			       || argv[i][end-2] == '-'))
+				option = (int)argv[i][end-2];
+
+			if(addto_qos_char_list(qos->preemptee_list,
+					       qos_list, argv[i]+end, option))
+				set = 1;
+			else
+				exit_code = 1;
+		} else if (!strncasecmp (argv[i], "Preemptor", 9)) {
+			int option = 0;
+			if(!qos)
+				continue;
+
+			if(!qos->preemptor_list) 
+				qos->preemptor_list = 
+					list_create(slurm_destroy_char);
+						
+			if(!qos_list) 
+				qos_list = acct_storage_g_get_qos(
+					db_conn, my_uid, NULL);
+						
+			if(end > 2 && argv[i][end-1] == '='
+			   && (argv[i][end-2] == '+' 
+			       || argv[i][end-2] == '-'))
+				option = (int)argv[i][end-2];
+
+			if(addto_qos_char_list(qos->preemptor_list,
+					       qos_list, argv[i]+end, option))
+				set = 1;
+			else
+				exit_code = 1;
+		} else if (!strncasecmp (argv[i], "Priority", 3)) {
+			if(!qos)
+				continue;
+			
+			if (get_uint(argv[i]+end, &qos->priority,
+			    "Priority") == SLURM_SUCCESS)
+				set = 1;
+		} else {
+			printf(" Unknown option: %s\n"
+			       " Use keyword 'where' to modify condition\n",
+			       argv[i]);
+		}
+	}
+
+	(*start) = i;
+
+	return set;
+}
 
 extern int sacctmgr_add_qos(int argc, char *argv[])
 {
 	int rc = SLURM_SUCCESS;
-	int i=0;
+	int i=0, limit_set=0;
 	ListIterator itr = NULL;
 	acct_qos_rec_t *qos = NULL;
+	acct_qos_rec_t *start_qos = xmalloc(sizeof(acct_qos_rec_t));
 	List name_list = list_create(slurm_destroy_char);
 	char *description = NULL;
 	char *name = NULL;
 	List qos_list = NULL;
 	List local_qos_list = NULL;
 	char *qos_str = NULL;
-	
-	for (i=0; i<argc; i++) {
-		int end = parse_option_end(argv[i]);
-		if(!end || !strncasecmp (argv[i], "Names", 1)) {
-			slurm_addto_char_list(name_list, argv[i]+end);
-		} else if (!strncasecmp (argv[i], "Description", 1)) {
-			description = strip_quotes(argv[i]+end, NULL);
-		} else {
-			exit_code=1;
-			fprintf(stderr, " Unknown option: %s\n", argv[i]);
-		}		
-	}
+
+	init_acct_qos_rec(start_qos);
+
+	for (i=0; i<argc; i++) 
+		limit_set = _set_rec(&i, argc, argv, name_list, start_qos);
 
 	if(exit_code) {
 		list_destroy(name_list);
@@ -167,7 +306,7 @@ extern int sacctmgr_add_qos(int argc, char *argv[])
 		return SLURM_ERROR;
 	} else if(!list_count(name_list)) {
 		list_destroy(name_list);
-		xfree(description);
+		destroy_acct_qos_rec(start_qos);
 		exit_code=1;
 		fprintf(stderr, " Need name of qos to add.\n"); 
 		return SLURM_SUCCESS;
@@ -193,11 +332,31 @@ extern int sacctmgr_add_qos(int argc, char *argv[])
 		if(!sacctmgr_find_qos_from_list(local_qos_list, name)) {
 			qos = xmalloc(sizeof(acct_qos_rec_t));
 			qos->name = xstrdup(name);
-			if(description) 
-				qos->description = xstrdup(description);
+			if(start_qos->description) 
+				qos->description =
+					xstrdup(start_qos->description);
 			else
 				qos->description = xstrdup(name);
 
+			qos->grp_cpu_mins = start_qos->grp_cpu_mins;
+			qos->grp_cpus = start_qos->grp_cpus;
+			qos->grp_jobs = start_qos->grp_jobs;
+			qos->grp_nodes = start_qos->grp_nodes;
+			qos->grp_submit_jobs = start_qos->grp_submit_jobs;
+			qos->grp_wall = start_qos->grp_wall;
+
+			qos->max_cpu_mins_pu = start_qos->max_cpu_mins_pu;
+			qos->max_cpus_pu = start_qos->max_cpus_pu;
+			qos->max_jobs_pu = start_qos->max_jobs_pu;
+			qos->max_nodes_pu = start_qos->max_nodes_pu;
+			qos->max_submit_jobs_pu = start_qos->max_submit_jobs_pu;
+			qos->max_wall_pu = start_qos->max_wall_pu;
+
+			if(start_qos->job_flags)
+				qos->job_flags = start_qos->job_flags;
+
+			qos->priority = start_qos->priority;
+
 			xstrfmtcat(qos_str, "  %s\n", name);
 			list_append(qos_list, qos);
 		}
@@ -215,9 +374,12 @@ extern int sacctmgr_add_qos(int argc, char *argv[])
 		printf(" Adding QOS(s)\n%s", qos_str);
 		printf(" Settings\n");
 		if(description)
-			printf("  Description     = %s\n", description);
+			printf("  Description    = %s\n", description);
 		else
-			printf("  Description     = %s\n", "QOS Name");
+			printf("  Description    = %s\n", "QOS Name");
+
+		sacctmgr_print_qos_limits(start_qos);
+
 		xfree(qos_str);
 	}
 	
@@ -259,6 +421,7 @@ extern int sacctmgr_list_qos(int argc, char *argv[])
 	acct_qos_rec_t *qos = NULL;
 	char *object;
 	List qos_list = NULL;
+	int field_count = 0;
 
 	print_field_t *field = NULL;
 
@@ -268,7 +431,21 @@ extern int sacctmgr_list_qos(int argc, char *argv[])
 	enum {
 		PRINT_DESC,
 		PRINT_ID,
-		PRINT_NAME
+		PRINT_NAME,
+		PRINT_JOBF,
+		PRINT_PRIO,
+		PRINT_GRPCM,
+		PRINT_GRPC,
+		PRINT_GRPJ,
+		PRINT_GRPN,
+		PRINT_GRPS,
+		PRINT_GRPW,
+		PRINT_MAXC,
+		PRINT_MAXCM,
+		PRINT_MAXJ,
+		PRINT_MAXN,
+		PRINT_MAXS,
+		PRINT_MAXW,
 	};
 
 	_set_cond(&i, argc, argv, qos_cond, format_list);
@@ -278,35 +455,107 @@ extern int sacctmgr_list_qos(int argc, char *argv[])
 		list_destroy(format_list);		
 		return SLURM_ERROR;
 	} else if(!list_count(format_list)) {
-		slurm_addto_char_list(format_list, "N");
+		slurm_addto_char_list(format_list, "N,Prio,JobF,"
+				      "GrpJ,GrpN,GrpS,MaxJ,MaxN,MaxS,MaxW");
 	}
 
 	print_fields_list = list_create(destroy_print_field);
 
 	itr = list_iterator_create(format_list);
 	while((object = list_next(itr))) {
+		char *tmp_char = NULL;
 		field = xmalloc(sizeof(print_field_t));
 		if(!strncasecmp("Description", object, 1)) {
 			field->type = PRINT_DESC;
 			field->name = xstrdup("Descr");
 			field->len = 20;
 			field->print_routine = print_fields_str;
+		} else if(!strncasecmp("GrpCPUMins", object, 8)) {
+			field->type = PRINT_GRPCM;
+			field->name = xstrdup("GrpCPUMins");
+			field->len = 11;
+			field->print_routine = print_fields_uint64;
+		} else if(!strncasecmp("GrpCPUs", object, 8)) {
+			field->type = PRINT_GRPC;
+			field->name = xstrdup("GrpCPUs");
+			field->len = 8;
+			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("GrpJobs", object, 4)) {
+			field->type = PRINT_GRPJ;
+			field->name = xstrdup("GrpJobs");
+			field->len = 7;
+			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("GrpNodes", object, 4)) {
+			field->type = PRINT_GRPN;
+			field->name = xstrdup("GrpNodes");
+			field->len = 8;
+			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("GrpSubmitJobs", object, 4)) {
+			field->type = PRINT_GRPS;
+			field->name = xstrdup("GrpSubmit");
+			field->len = 9;
+			field->print_routine = print_fields_uint;
 		} else if(!strncasecmp("ID", object, 1)) {
 			field->type = PRINT_ID;
 			field->name = xstrdup("ID");
 			field->len = 6;
 			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("JobFlags", object, 1)) {
+			field->type = PRINT_JOBF;
+			field->name = xstrdup("JobFlags");
+			field->len = 20;
+			field->print_routine = print_fields_str;
+		} else if(!strncasecmp("MaxCPUMins", object, 7)) {
+			field->type = PRINT_MAXCM;
+			field->name = xstrdup("MaxCPUMins");
+			field->len = 11;
+			field->print_routine = print_fields_uint64;
+		} else if(!strncasecmp("MaxCPUs", object, 7)) {
+			field->type = PRINT_MAXC;
+			field->name = xstrdup("MaxCPUs");
+			field->len = 8;
+			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("MaxJobs", object, 4)) {
+			field->type = PRINT_MAXJ;
+			field->name = xstrdup("MaxJobs");
+			field->len = 7;
+			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("MaxNodes", object, 4)) {
+			field->type = PRINT_MAXN;
+			field->name = xstrdup("MaxNodes");
+			field->len = 8;
+			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("MaxSubmitJobs", object, 4)) {
+			field->type = PRINT_MAXS;
+			field->name = xstrdup("MaxSubmit");
+			field->len = 9;
+			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("MaxWall", object, 4)) {
+			field->type = PRINT_MAXW;
+			field->name = xstrdup("MaxWall");
+			field->len = 11;
+			field->print_routine = print_fields_time;
 		} else if(!strncasecmp("Name", object, 1)) {
 			field->type = PRINT_NAME;
 			field->name = xstrdup("NAME");
 			field->len = 10;
 			field->print_routine = print_fields_str;
+		} else if(!strncasecmp("Priority", object, 1)) {
+			field->type = PRINT_PRIO;
+			field->name = xstrdup("Priority");
+			field->len = 10;
+			field->print_routine = print_fields_int;
 		} else {
 			exit_code=1;
 			fprintf(stderr, "Unknown field '%s'\n", object);
 			xfree(field);
 			continue;
 		}
+		if((tmp_char = strstr(object, "\%"))) {
+			int newlen = atoi(tmp_char+1);
+			if(newlen > 0) 
+				field->len = newlen;
+		}
 		list_append(print_fields_list, field);		
 	}
 	list_iterator_destroy(itr);
@@ -329,24 +578,108 @@ extern int sacctmgr_list_qos(int argc, char *argv[])
 	itr2 = list_iterator_create(print_fields_list);
 	print_fields_header(print_fields_list);
 
+	field_count = list_count(print_fields_list);
+
 	while((qos = list_next(itr))) {
+		int curr_inx = 1;
 		while((field = list_next(itr2))) {
 			switch(field->type) {
 			case PRINT_DESC:
 				field->print_routine(
-					field, qos->description);
+					field, qos->description,
+					(curr_inx == field_count));
+				break;
+			case PRINT_GRPCM:
+				field->print_routine(
+					field,
+					qos->grp_cpu_mins,
+					(curr_inx == field_count));
+				break;
+			case PRINT_GRPC:
+				field->print_routine(field,
+						     qos->grp_cpus,
+						     (curr_inx == field_count));
+				break;
+			case PRINT_GRPJ:
+				field->print_routine(field, 
+						     qos->grp_jobs,
+						     (curr_inx == field_count));
+				break;
+			case PRINT_GRPN:
+				field->print_routine(field,
+						     qos->grp_nodes,
+						     (curr_inx == field_count));
+				break;
+			case PRINT_GRPS:
+				field->print_routine(field, 
+						     qos->grp_submit_jobs,
+						     (curr_inx == field_count));
+				break;
+			case PRINT_GRPW:
+				field->print_routine(
+					field,
+					qos->grp_wall,
+					(curr_inx == field_count));
 				break;
 			case PRINT_ID:
 				field->print_routine(
-					field, qos->id);
+					field, qos->id,
+					(curr_inx == field_count));
+				break;
+			case PRINT_JOBF:
+				field->print_routine(
+					field, qos->job_flags,
+					(curr_inx == field_count));
+				break;
+			case PRINT_MAXCM:
+				field->print_routine(
+					field,
+					qos->max_cpu_mins_pu,
+					(curr_inx == field_count));
+				break;
+			case PRINT_MAXC:
+				field->print_routine(field,
+						     qos->max_cpus_pu,
+						     (curr_inx == field_count));
+				break;
+			case PRINT_MAXJ:
+				field->print_routine(field, 
+						     qos->max_jobs_pu,
+						     (curr_inx == field_count));
+				break;
+			case PRINT_MAXN:
+				field->print_routine(field,
+						     qos->max_nodes_pu,
+						     (curr_inx == field_count));
+				break;
+			case PRINT_MAXS:
+				field->print_routine(field, 
+						     qos->max_submit_jobs_pu,
+						     (curr_inx == field_count));
+				break;
+			case PRINT_MAXW:
+				field->print_routine(
+					field,
+					qos->max_wall_pu,
+					(curr_inx == field_count));
 				break;
 			case PRINT_NAME:
 				field->print_routine(
-					field, qos->name);
+					field, qos->name,
+					(curr_inx == field_count));
+				break;
+			case PRINT_PRIO:
+				field->print_routine(
+					field, qos->priority,
+					(curr_inx == field_count));
 				break;
 			default:
+				field->print_routine(
+					field, NULL,
+					(curr_inx == field_count));
 				break;
 			}
+			curr_inx++;
 		}
 		list_iterator_reset(itr2);
 		printf("\n");
@@ -359,6 +692,90 @@ extern int sacctmgr_list_qos(int argc, char *argv[])
 	return rc;
 }
 
+extern int sacctmgr_modify_qos(int argc, char *argv[])
+{
+	int rc = SLURM_SUCCESS;
+	acct_qos_cond_t *qos_cond = xmalloc(sizeof(acct_qos_cond_t));
+	acct_qos_rec_t *qos = xmalloc(sizeof(acct_qos_rec_t));
+	int i=0;
+	int cond_set = 0, rec_set = 0, set = 0;
+	List ret_list = NULL;
+
+	init_acct_qos_rec(qos);
+
+	for (i=0; i<argc; i++) {
+		if (!strncasecmp (argv[i], "Where", 5)) {
+			i++;
+			cond_set = _set_cond(&i, argc, argv, qos_cond, NULL);
+			      
+		} else if (!strncasecmp (argv[i], "Set", 3)) {
+			i++;
+			rec_set = _set_rec(&i, argc, argv, NULL, qos);
+		} else {
+			cond_set = _set_cond(&i, argc, argv, qos_cond, NULL);
+		}
+	}
+
+	if(exit_code) {
+		destroy_acct_qos_cond(qos_cond);
+		destroy_acct_qos_rec(qos);
+		return SLURM_ERROR;
+	} else if(!rec_set) {
+		exit_code=1;
+		fprintf(stderr, " You didn't give me anything to set\n");
+		destroy_acct_qos_cond(qos_cond);
+		destroy_acct_qos_rec(qos);
+		return SLURM_ERROR;
+	} else if(!cond_set) {
+		if(!commit_check("You didn't set any conditions with 'WHERE'.\n"
+				 "Are you sure you want to continue?")) {
+			printf("Aborted\n");
+			destroy_acct_qos_cond(qos_cond);
+			destroy_acct_qos_rec(qos);
+			return SLURM_SUCCESS;
+		}		
+	}
+
+	notice_thread_init();		
+	
+	ret_list = acct_storage_g_modify_qos(db_conn, my_uid, qos_cond, qos);
+	if(ret_list && list_count(ret_list)) {
+		char *object = NULL;
+		ListIterator itr = list_iterator_create(ret_list);
+		printf(" Modified qos...\n");
+		while((object = list_next(itr))) {
+			printf("  %s\n", object);
+		}
+		list_iterator_destroy(itr);
+		set = 1;
+	} else if(ret_list) {
+		printf(" Nothing modified\n");
+	} else {
+		exit_code=1;
+		fprintf(stderr, " Error with request\n");
+		rc = SLURM_ERROR;
+	}
+	
+	if(ret_list)
+		list_destroy(ret_list);
+
+	notice_thread_fini();
+
+	if(set) {
+		if(commit_check("Would you like to commit changes?")) 
+			acct_storage_g_commit(db_conn, 1);
+		else {
+			printf(" Changes Discarded\n");
+			acct_storage_g_commit(db_conn, 0);
+		}
+	}
+
+	destroy_acct_qos_cond(qos_cond);
+	destroy_acct_qos_rec(qos);	
+
+	return rc;
+}
+
 extern int sacctmgr_delete_qos(int argc, char *argv[])
 {
 	int rc = SLURM_SUCCESS;
diff --git a/src/sacctmgr/sacctmgr.c b/src/sacctmgr/sacctmgr.c
index 98cdb9335..87cb5ab1a 100644
--- a/src/sacctmgr/sacctmgr.c
+++ b/src/sacctmgr/sacctmgr.c
@@ -72,7 +72,7 @@ main (int argc, char *argv[])
 	char **input_fields;
 	log_options_t opts = LOG_OPTS_STDERR_ONLY ;
 	int local_exit_code = 0;
-
+	char *temp = NULL;
 	int option_index;
 	static struct option long_options[] = {
 		{"help",     0, 0, 'h'},
@@ -171,10 +171,23 @@ main (int argc, char *argv[])
 		log_alter(opts, 0, NULL);
 	}
 
+	/* Check to see if we are running a supported accounting plugin */
+	temp = slurm_get_accounting_storage_type();
+	if(strcasecmp(temp, "accounting_storage/slurmdbd")
+	   && strcasecmp(temp, "accounting_storage/mysql")) {
+		fprintf (stderr, "You are not running a supported "
+			 "accounting_storage plugin\n(%s).\n"
+			 "Only 'accounting_storage/slurmdbd' "
+			 "and 'accounting_storage/mysql' are supported.\n",
+			temp);
+		xfree(temp);
+		exit(1);
+	}
+	xfree(temp);
 	/* always do a rollback.  If you don't then if there is an
 	 * error you can not rollback ;)
 	 */
-	db_conn = acct_storage_g_get_connection(false, 1);
+	db_conn = acct_storage_g_get_connection(false, 0, 1);
 	my_uid = getuid();
 
 	if (input_field_count)
@@ -442,7 +455,7 @@ _process_command (int argc, char *argv[])
 		}
 
 		if(argc > 1)
-			my_time = parse_time(argv[1]);
+			my_time = parse_time(argv[1], 1);
 		if(acct_storage_g_roll_usage(db_conn, my_time)
 		   == SLURM_SUCCESS) {
 			if(commit_check("Would you like to commit rollup?")) {
@@ -658,10 +671,23 @@ sacctmgr [<OPTION>] [<COMMAND>]                                            \n\
      associations             when using show/list will list the           \n\
                               associations associated with the entity.     \n\
      delete <ENTITY> <SPECS>  delete the specified entity(s)               \n\
+     dump <CLUSTER> [<FILE>]  dump database information of the             \n\
+                              specified cluster to the flat file.          \n\
+                              Will default to clustername.cfg if no file   \n\
+                              is given.                                    \n\
      exit                     terminate sacctmgr                           \n\
      help                     print this description of use.               \n\
      list <ENTITY> [<SPECS>]  display info of identified entity, default   \n\
                               is display all.                              \n\
+     load <FILE> [<SPECS>]    read in the file to update the database      \n\
+                              with the file contents. <SPECS> here consist \n\
+                              of 'cluster=', and 'clean'.  The 'cluster='  \n\
+                              will override the cluster name given in the  \n\
+                              file.  The 'clean' option will remove what is\n\
+                              already in the system for this cluster and   \n\
+                              replace it with the file.  If the clean option\n\
+                              is not given only new additions or           \n\
+                              modifications will be done, no deletions.    \n\
      modify <ENTITY> <SPECS>  modify entity                                \n\
      oneliner                 report output one record per line.           \n\
      parsable                 output will be | delimited with an ending '|'\n\
@@ -680,13 +706,17 @@ sacctmgr [<OPTION>] [<COMMAND>]                                            \n\
   <SPECS> are different for each command entity pair.                      \n\
        list account       - Clusters=, Descriptions=, Format=, Names=,     \n\
                             Organizations=, Parents=, WithCoor=,           \n\
-                            and WithAssocs                                 \n\
+                            WithSubAccounts, and WithAssocs                \n\
        add account        - Clusters=, Description=, Fairshare=,           \n\
-                            MaxCPUSecs=, MaxJobs=, MaxNodes=, MaxWall=,    \n\
-                            Names=, Organization=, Parent=, and QosLevel   \n\
+                            GrpCPUMins=, GrpCPUs=, GrpJobs=, GrpNodes=,    \n\
+                            GrpSubmitJob=, GrpWall=, MaxCPUMins=, MaxJobs=,\n\
+                            MaxNodes=, MaxWall=, Names=, Organization=,    \n\
+                            Parent=, and QosLevel                          \n\
        modify account     - (set options) Description=, Fairshare=,        \n\
-                            MaxCPUSecs=, MaxJobs=, MaxNodes=, MaxWall=,    \n\
-                            Organization=, Parent=, and QosLevel=          \n\
+                            GrpCPUMins=, GrpCPUs=, GrpJobs=, GrpNodes=,    \n\
+                            GrpSubmitJob=, GrpWall=, MaxCPUMins=, MaxJobs=,\n\
+                            MaxNodes=, MaxWall=, Names=, Organization=,    \n\
+                            Parent=, and QosLevel=                         \n\
                             (where options) Clusters=, Descriptions=,      \n\
                             Names=, Organizations=, Parent=, and QosLevel= \n\
        delete account     - Clusters=, Descriptions=, Names=,              \n\
@@ -694,21 +724,29 @@ sacctmgr [<OPTION>] [<COMMAND>]                                            \n\
                                                                            \n\
        list associations  - Accounts=, Clusters=, Format=, ID=,            \n\
                             Partitions=, Parent=, Tree, Users=,            \n\
-                            WithDeleted, WOPInfo, WOPLimits                \n\
+                            WithSubAccounts, WithDeleted, WOPInfo,         \n\
+                            and WOPLimits                                  \n\
                                                                            \n\
-       list cluster       - Names= Format=                                 \n\
-       add cluster        - Fairshare=, MaxCPUSecs=,                       \n\
-                            MaxJobs=, MaxNodes=, MaxWall=, and Names=      \n\
-       modify cluster     - (set options) Fairshare=, MaxCPUSecs=,         \n\
-                            MaxJobs=, MaxNodes=, and MaxWall=              \n\
+       list cluster       - Format=, Names=                                \n\
+       add cluster        - Fairshare=, GrpCPUMins=, GrpCPUs=, GrpJobs=,   \n\
+                            GrpNodes=, GrpSubmitJob=, GrpWall=, MaxCPUMins=\n\
+                            MaxJobs=, MaxNodes=, MaxWall=, and Name=       \n\
+       modify cluster     - (set options) Fairshare=, GrpCPUMins=,         \n\
+                            GrpCPUs=, GrpJobs=, GrpNodes=, GrpSubmitJob=,  \n\
+                            GrpWall=, MaxCPUMins=, MaxJobs=, MaxNodes=,    \n\
+                            and MaxWall=                                   \n\
                             (where options) Names=                         \n\
        delete cluster     - Names=                                         \n\
                                                                            \n\
        add coordinator    - Accounts=, and Names=                          \n\
        delete coordinator - Accounts=, and Names=                          \n\
                                                                            \n\
-       list qos           - Descriptions=, Ids=, Names=, and WithDeleted   \n\
-       add qos            - Description=, and Names=                       \n\
+       list qos           - Descriptions=, Format=, Ids=, Names=,          \n\
+                            and WithDeleted                                \n\
+       add qos            - Description=, GrpCPUMins=, GrpCPUs=, GrpJobs=, \n\
+                            GrpNodes=, GrpSubmitJob=, GrpWall=, JobFlags=, \n\
+                            MaxCPUMins=, MaxJobs=, MaxNodes=, MaxWall=,    \n\
+                            Preemptee=, Preemptor=, Priority=, and Names=  \n\
        delete qos         - Descriptions=, Ids=, and Names=                \n\
                                                                            \n\
        list transactions  - Actor=, EndTime,                               \n\
@@ -717,11 +755,11 @@ sacctmgr [<OPTION>] [<COMMAND>]                                            \n\
        list user          - AdminLevel=, DefaultAccounts=, Format=, Names=,\n\
                             QosLevel=, WithCoor=, and WithAssocs           \n\
        add user           - Accounts=, AdminLevel=, Clusters=,             \n\
-                            DefaultAccount=, Fairshare=, MaxCPUSecs=,      \n\
-                            MaxJobs=, MaxNodes=, MaxWall=, Names=,         \n\
-                            Partitions=, and QosLevel=                     \n\
+                            DefaultAccount=, Fairshare=, MaxCPUMins=       \n\
+                            MaxCPUs=, MaxJobs=, MaxNodes=, MaxWall=,       \n\
+                            Names=, Partitions=, and QosLevel=             \n\
        modify user        - (set options) AdminLevel=, DefaultAccount=,    \n\
-                            Fairshare=, MaxCPUSecs=, MaxJobs=,             \n\
+                            Fairshare=, MaxCPUMins=, MaxCPUs= MaxJobs=,    \n\
                             MaxNodes=, MaxWall=, and QosLevel=             \n\
                             (where options) Accounts=, AdminLevel=,        \n\
                             Clusters=, DefaultAccounts=, Names=,           \n\
@@ -731,12 +769,14 @@ sacctmgr [<OPTION>] [<COMMAND>]                                            \n\
                                                                            \n\
   Format options are different for listing each entity pair.               \n\
                                                                            \n\
-       Account            - Account, Cluster, CoordinatorList,             \n\
-                            Description, Organization, QOS, QOSRAW         \n\
+       Account            - Account, CoordinatorList, Description,         \n\
+                            Organization                                   \n\
                                                                            \n\
-       Association        - Account, Cluster, Fairshare, ID, LFT,          \n\
-                            MaxCPUSecs, MaxJobs, MaxNodes, MaxWall,        \n\
-                            ParentID, ParentName, Partition, RGT, User     \n\
+       Association        - Account, Cluster, Fairshare, GrpCPUMins,       \n\
+                            GrpCPUs, GrpJobs, GrpNodes, GrpSubmitJob,      \n\
+                            GrpWall, ID, LFT, MaxCPUSecs, MaxJobs,         \n\
+                            MaxNodes, MaxWall, QOS, ParentID,              \n\
+                            ParentName, Partition, RGT, User               \n\
                                                                            \n\
        Cluster            - Cluster, ControlHost, ControlPort, Fairshare   \n\
                             MaxCPUSecs, MaxJobs, MaxNodes, MaxWall         \n\
@@ -745,8 +785,8 @@ sacctmgr [<OPTION>] [<COMMAND>]                                            \n\
                                                                            \n\
        Transactions       - Action, Actor, ID, Info, TimeStamp, Where      \n\
                                                                            \n\
-       User               - Account, AdminLevel, Cluster, CoordinatorList, \n\
-                            DefaultAccount, QOS, QOSRAW, User              \n\
+       User               - AdminLevel, CoordinatorList, DefaultAccount,   \n\
+                            User                                           \n\
                                                                            \n\
                                                                            \n\
   All commands entitys, and options are case-insensitive.               \n\n");
diff --git a/src/sacctmgr/sacctmgr.h b/src/sacctmgr/sacctmgr.h
index 4fc865fd4..38b94879a 100644
--- a/src/sacctmgr/sacctmgr.h
+++ b/src/sacctmgr/sacctmgr.h
@@ -145,16 +145,20 @@ extern int notice_thread_init();
 extern int notice_thread_fini();
 extern int commit_check(char *warning);
 extern int get_uint(char *in_value, uint32_t *out_value, char *type);
+extern int get_uint64(char *in_value, uint64_t *out_value, char *type);
 extern int addto_qos_char_list(List char_list, List qos_list, char *names, 
 			       int option);
+extern int addto_action_char_list(List char_list, char *names);
+extern List copy_char_list(List qos_list);
 extern void sacctmgr_print_coord_list(
 	print_field_t *field, List value, int last);
 extern void sacctmgr_print_qos_list(print_field_t *field, List qos_list,
 				    List value, int last);
-extern char *get_qos_complete_str(List qos_list, List num_qos_list);
+
+extern void sacctmgr_print_assoc_limits(acct_association_rec_t *assoc);
+extern void sacctmgr_print_qos_limits(acct_qos_rec_t *qos);
 extern int sort_coord_list(acct_coord_rec_t *coord_a,
 			   acct_coord_rec_t *coord_b);
-extern int sort_char_list(char *name_a, char *name_b);
 
 /* you need to free the objects returned from these functions */
 extern acct_association_rec_t *sacctmgr_find_association(char *user,
@@ -188,6 +192,9 @@ extern acct_cluster_rec_t *sacctmgr_find_cluster_from_list(
 
 
 /* file_functions.c */
+extern int print_file_add_limits_to_line(char **line,
+					 acct_association_rec_t *assoc);
+
 extern int print_file_sacctmgr_assoc_list(FILE *fd, 
 					  List sacctmgr_assoc_list,
 					  List user_list,
diff --git a/src/sacctmgr/txn_functions.c b/src/sacctmgr/txn_functions.c
index e1ddb2902..47d69a11f 100644
--- a/src/sacctmgr/txn_functions.c
+++ b/src/sacctmgr/txn_functions.c
@@ -51,6 +51,9 @@ static int _set_cond(int *start, int argc, char *argv[],
 		end = parse_option_end(argv[i]);
 		if(!end && !strncasecmp(argv[i], "where", 5)) {
 			continue;
+		} if(!end && !strncasecmp(argv[i], "withassocinfo", 5)) {
+			txn_cond->with_assoc_info = 1;
+			set = 1;
 		} else if(!end
 			  || (!strncasecmp (argv[i], "Id", 1))
 			  || (!strncasecmp (argv[i], "Txn", 1))) {
@@ -61,32 +64,53 @@ static int _set_cond(int *start, int argc, char *argv[],
 			if(slurm_addto_char_list(txn_cond->id_list, 
 						 argv[i]+end))
 				set = 1;
+		} else if (!strncasecmp (argv[i], "Accounts", 3)) {
+			if(!txn_cond->acct_list)
+				txn_cond->acct_list =
+					list_create(slurm_destroy_char);
+			if(slurm_addto_char_list(txn_cond->acct_list,
+						 argv[i]+end))
+				set = 1;
 		} else if (!strncasecmp (argv[i], "Action", 4)) {
-			/* FIX ME! fill this in */
-/* 			if(!txn_cond->action_list) */
-/* 				txn_cond->action_list =  */
-/* 					list_create(slurm_destroy_char); */
-
-/* 			if(slurm_addto_char_list(txn_cond->action_list, */
-/* 					argv[i]+end)) */
-/* 			set = 1; */
-		} else if (!strncasecmp (argv[i], "Actors", 4)
-			   || !strncasecmp (argv[i], "User", 1)) {
+			if(!txn_cond->action_list)
+				txn_cond->action_list =
+					list_create(slurm_destroy_char);
+
+			if(addto_action_char_list(txn_cond->action_list,
+						  argv[i]+end))
+				set = 1;
+			else
+				exit_code=1;
+		} else if (!strncasecmp (argv[i], "Actors", 4)) {
 			if(!txn_cond->actor_list)
 				txn_cond->actor_list =
 					list_create(slurm_destroy_char);
 			if(slurm_addto_char_list(txn_cond->actor_list,
 						 argv[i]+end))
 				set = 1;
+		} else if (!strncasecmp (argv[i], "Clusters", 3)) {
+			if(!txn_cond->cluster_list)
+				txn_cond->cluster_list =
+					list_create(slurm_destroy_char);
+			if(slurm_addto_char_list(txn_cond->cluster_list,
+						 argv[i]+end))
+				set = 1;
 		} else if (!strncasecmp (argv[i], "End", 1)) {
-			txn_cond->time_end = parse_time(argv[i]+end);
+			txn_cond->time_end = parse_time(argv[i]+end, 1);
 			set = 1;
 		} else if (!strncasecmp (argv[i], "Format", 1)) {
 			if(format_list)
 				slurm_addto_char_list(format_list, argv[i]+end);
 		} else if (!strncasecmp (argv[i], "Start", 1)) {
-			txn_cond->time_start = parse_time(argv[i]+end);
+			txn_cond->time_start = parse_time(argv[i]+end, 1);
 			set = 1;
+		} else if (!strncasecmp (argv[i], "User", 1)) {
+			if(!txn_cond->user_list)
+				txn_cond->user_list =
+					list_create(slurm_destroy_char);
+			if(slurm_addto_char_list(txn_cond->user_list,
+						 argv[i]+end))
+				set = 1;
 		} else {
 			exit_code=1;
 			fprintf(stderr, " Unknown condition: %s\n", argv[i]);
@@ -108,6 +132,7 @@ extern int sacctmgr_list_txn(int argc, char *argv[])
 	ListIterator itr = NULL;
 	ListIterator itr2 = NULL;
 	char *object = NULL;
+	int field_count = 0;
 
 	print_field_t *field = NULL;
 
@@ -115,11 +140,14 @@ extern int sacctmgr_list_txn(int argc, char *argv[])
 	List print_fields_list; /* types are of print_field_t */
 
 	enum {
+		PRINT_ACCT,
 		PRINT_ACTION,
 		PRINT_ACTOR,
+		PRINT_CLUSTER,
 		PRINT_ID,
 		PRINT_INFO,
 		PRINT_TS,
+		PRINT_USER,
 		PRINT_WHERE
 	};
 
@@ -133,13 +161,23 @@ extern int sacctmgr_list_txn(int argc, char *argv[])
 
 	print_fields_list = list_create(destroy_print_field);
 
-	if(!list_count(format_list)) 
+	if(!list_count(format_list)) {
 		slurm_addto_char_list(format_list, "T,Action,Actor,Where,Info");
-	
+		if(txn_cond->with_assoc_info) 
+			slurm_addto_char_list(format_list, 
+					      "User,Account,Cluster");
+	}
+
 	itr = list_iterator_create(format_list);
 	while((object = list_next(itr))) {
+		char *tmp_char = NULL;
 		field = xmalloc(sizeof(print_field_t));
-		if(!strncasecmp("Action", object, 4)) {
+		if(!strncasecmp("Accounts", object, 3)) {
+			field->type = PRINT_ACCT;
+			field->name = xstrdup("Accounts");
+			field->len = 20;
+			field->print_routine = print_fields_str;
+		} else if(!strncasecmp("Action", object, 4)) {
 			field->type = PRINT_ACTION;
 			field->name = xstrdup("Action");
 			field->len = 20;
@@ -149,6 +187,11 @@ extern int sacctmgr_list_txn(int argc, char *argv[])
 			field->name = xstrdup("Actor");
 			field->len = 10;
 			field->print_routine = print_fields_str;
+		} else if(!strncasecmp("Clusters", object, 4)) {
+			field->type = PRINT_CLUSTER;
+			field->name = xstrdup("Clusters");
+			field->len = 20;
+			field->print_routine = print_fields_str;
 		} else if(!strncasecmp("ID", object, 2)) {
 			field->type = PRINT_ID;
 			field->name = xstrdup("ID");
@@ -164,6 +207,11 @@ extern int sacctmgr_list_txn(int argc, char *argv[])
 			field->name = xstrdup("Time");
 			field->len = 15;
 			field->print_routine = print_fields_date;
+		} else if(!strncasecmp("Users", object, 4)) {
+			field->type = PRINT_USER;
+			field->name = xstrdup("Users");
+			field->len = 20;
+			field->print_routine = print_fields_str;
 		} else if(!strncasecmp("Where", object, 1)) {
 			field->type = PRINT_WHERE;
 			field->name = xstrdup("Where");
@@ -175,6 +223,11 @@ extern int sacctmgr_list_txn(int argc, char *argv[])
 			xfree(field);
 			continue;
 		}
+		if((tmp_char = strstr(object, "\%"))) {
+			int newlen = atoi(tmp_char+1);
+			if(newlen > 0) 
+				field->len = newlen;
+		}
 		list_append(print_fields_list, field);		
 	}
 	list_iterator_destroy(itr);
@@ -198,38 +251,62 @@ extern int sacctmgr_list_txn(int argc, char *argv[])
 	itr2 = list_iterator_create(print_fields_list);
 	print_fields_header(print_fields_list);
 
+	field_count = list_count(print_fields_list);
+
 	while((txn = list_next(itr))) {
+		int curr_inx = 1;
 		while((field = list_next(itr2))) {
 			switch(field->type) {
+			case PRINT_ACCT:
+				field->print_routine(field, txn->accts,
+						     (curr_inx == field_count));
+				break;
 			case PRINT_ACTION:
 				field->print_routine(
 					field, 
 					slurmdbd_msg_type_2_str(txn->action,
-								0));
+								0),
+					(curr_inx == field_count));
 				break;
 			case PRINT_ACTOR:
 				field->print_routine(field,
-						     txn->actor_name);
+						     txn->actor_name,
+						     (curr_inx == field_count));
+				break;
+			case PRINT_CLUSTER:
+				field->print_routine(field, txn->clusters,
+						     (curr_inx == field_count));
 				break;
 			case PRINT_ID:
 				field->print_routine(field,
-						     txn->id);
+						     txn->id,
+						     (curr_inx == field_count));
 				break;
 			case PRINT_INFO:
 				field->print_routine(field, 
-						     txn->set_info);
+						     txn->set_info,
+						     (curr_inx == field_count));
 				break;
 			case PRINT_TS:
 				field->print_routine(field,
-						     txn->timestamp);
+						     txn->timestamp,
+						     (curr_inx == field_count));
+				break;
+			case PRINT_USER:
+				field->print_routine(field, txn->users,
+						     (curr_inx == field_count));
 				break;
 			case PRINT_WHERE:
 				field->print_routine(field, 
-						     txn->where_query);
+						     txn->where_query,
+						     (curr_inx == field_count));
 				break;
 			default:
-				break;
+				field->print_routine(field, NULL,
+						     (curr_inx == field_count));
+					break;
 			}
+			curr_inx++;
 		}
 		list_iterator_reset(itr2);
 		printf("\n");
diff --git a/src/sacctmgr/user_functions.c b/src/sacctmgr/user_functions.c
index 66d428961..5a7b96113 100644
--- a/src/sacctmgr/user_functions.c
+++ b/src/sacctmgr/user_functions.c
@@ -48,27 +48,26 @@ static int _set_cond(int *start, int argc, char *argv[],
 	int a_set = 0;
 	int end = 0;
 	List qos_list = NULL;
+	acct_association_cond_t *assoc_cond = NULL;
 
 	if(!user_cond) {
 		error("No user_cond given");
 		return -1;
 	}
 
-	if(!user_cond->assoc_cond) {
+	if(!user_cond->assoc_cond) 
 		user_cond->assoc_cond = 
 			xmalloc(sizeof(acct_association_cond_t));
-		user_cond->assoc_cond->fairshare = NO_VAL;
-		user_cond->assoc_cond->max_cpu_secs_per_job = NO_VAL;
-		user_cond->assoc_cond->max_jobs = NO_VAL;
-		user_cond->assoc_cond->max_nodes_per_job = NO_VAL;
-		user_cond->assoc_cond->max_wall_duration_per_job = NO_VAL;
-		/* we need this to make sure we only change users, not
-		 * accounts if this list didn't exist it would change
-		 * accounts.
-		 */
-		user_cond->assoc_cond->user_list = 
-			list_create(slurm_destroy_char);
-	}
+		
+	assoc_cond = user_cond->assoc_cond;
+	
+	/* we need this to make sure we only change users, not
+	 * accounts if this list didn't exist it would change
+	 * accounts. Having it blank is fine, it just needs to
+	 * exist.
+	 */
+	if(!assoc_cond->user_list)
+		assoc_cond->user_list = list_create(slurm_destroy_char);
 
 	for (i=(*start); i<argc; i++) {
 		end = parse_option_end(argv[i]);
@@ -77,38 +76,43 @@ static int _set_cond(int *start, int argc, char *argv[],
 			break;
 		} else if (!end && !strncasecmp (argv[i], "WithAssoc", 5)) {
 			user_cond->with_assocs = 1;
-		} else if (!strncasecmp (argv[i], "WithCoordinators", 5)) {
+		} else if (!end && 
+			   !strncasecmp (argv[i], "WithCoordinators", 5)) {
 			user_cond->with_coords = 1;
+		} else if (!end && 
+			   !strncasecmp (argv[i], "WithRawQOS", 5)) {
+			assoc_cond->with_raw_qos = 1;
+		} else if (!end && !strncasecmp (argv[i], "WOPInfo", 4)) {
+			assoc_cond->without_parent_info = 1;
+		} else if (!end && !strncasecmp (argv[i], "WOPLimits", 4)) {
+			assoc_cond->without_parent_limits = 1;
 		} else if(!end && !strncasecmp(argv[i], "where", 5)) {
 			continue;
 		} else if(!end
 			  || !strncasecmp (argv[i], "Names", 1)
 			  || !strncasecmp (argv[i], "Users", 1)) {
-			if(slurm_addto_char_list(
-				   user_cond->assoc_cond->user_list,
-				   argv[i]+end)) 
+			if(slurm_addto_char_list(assoc_cond->user_list,
+						 argv[i]+end)) 
 				u_set = 1;
 		} else if (!strncasecmp (argv[i], "Account", 2)) {
-			if(!user_cond->assoc_cond->acct_list) {
-				user_cond->assoc_cond->acct_list = 
+			if(!assoc_cond->acct_list) {
+				assoc_cond->acct_list = 
 					list_create(slurm_destroy_char);
 			}
-			if(slurm_addto_char_list(
-				   user_cond->assoc_cond->acct_list,
-				   argv[i]+end))
+			if(slurm_addto_char_list(assoc_cond->acct_list,
+						 argv[i]+end))
 				a_set = 1;
 		} else if (!strncasecmp (argv[i], "AdminLevel", 2)) {
 			user_cond->admin_level = 
 				str_2_acct_admin_level(argv[i]+end);
 			u_set = 1;			
 		} else if (!strncasecmp (argv[i], "Clusters", 1)) {
-			if(!user_cond->assoc_cond->cluster_list) {
-				user_cond->assoc_cond->cluster_list = 
+			if(!assoc_cond->cluster_list) {
+				assoc_cond->cluster_list = 
 					list_create(slurm_destroy_char);
 			}
-			if(slurm_addto_char_list(
-				   user_cond->assoc_cond->cluster_list,
-				   argv[i]+end))
+			if(slurm_addto_char_list(assoc_cond->cluster_list,
+						 argv[i]+end))
 				a_set = 1;
 		} else if (!strncasecmp (argv[i], "DefaultAccount", 1)) {
 			if(!user_cond->def_acct_list) {
@@ -121,19 +125,118 @@ static int _set_cond(int *start, int argc, char *argv[],
 		} else if (!strncasecmp (argv[i], "Format", 1)) {
 			if(format_list)
 				slurm_addto_char_list(format_list, argv[i]+end);
+		} else if (!strncasecmp (argv[i], "FairShare", 1)) {
+			if(!assoc_cond->fairshare_list)
+				assoc_cond->fairshare_list =
+					list_create(slurm_destroy_char);
+			if(slurm_addto_char_list(assoc_cond->fairshare_list,
+					argv[i]+end))
+				a_set = 1;
+		} else if (!strncasecmp (argv[i], "GrpCPUMins", 7)) {
+			if(!assoc_cond->grp_cpu_mins_list)
+				assoc_cond->grp_cpu_mins_list =
+					list_create(slurm_destroy_char);
+			if(slurm_addto_char_list(assoc_cond->grp_cpu_mins_list,
+					argv[i]+end))
+				a_set = 1;
+		} else if (!strncasecmp (argv[i], "GrpCpus", 7)) {
+			if(!assoc_cond->grp_cpus_list)
+				assoc_cond->grp_cpus_list =
+					list_create(slurm_destroy_char);
+			if(slurm_addto_char_list(assoc_cond->grp_cpus_list,
+					argv[i]+end))
+				a_set = 1;
+		} else if (!strncasecmp (argv[i], "GrpJobs", 4)) {
+			if(!assoc_cond->grp_jobs_list)
+				assoc_cond->grp_jobs_list =
+					list_create(slurm_destroy_char);
+			if(slurm_addto_char_list(assoc_cond->grp_jobs_list,
+					argv[i]+end))
+				a_set = 1;
+		} else if (!strncasecmp (argv[i], "GrpNodes", 4)) {
+			if(!assoc_cond->grp_nodes_list)
+				assoc_cond->grp_nodes_list =
+					list_create(slurm_destroy_char);
+			if(slurm_addto_char_list(assoc_cond->grp_nodes_list,
+					argv[i]+end))
+				a_set = 1;
+		} else if (!strncasecmp (argv[i], "GrpSubmitJobs", 4)) {
+			if(!assoc_cond->grp_submit_jobs_list)
+				assoc_cond->grp_submit_jobs_list =
+					list_create(slurm_destroy_char);
+			if(slurm_addto_char_list(
+				   assoc_cond->grp_submit_jobs_list,
+				   argv[i]+end))
+				a_set = 1;
+		} else if (!strncasecmp (argv[i], "GrpWall", 4)) {
+			if(!assoc_cond->grp_wall_list)
+				assoc_cond->grp_wall_list =
+					list_create(slurm_destroy_char);
+			if(slurm_addto_char_list(
+				   assoc_cond->grp_wall_list,
+				   argv[i]+end))
+				a_set = 1;
+		} else if (!strncasecmp (argv[i], "MaxCPUMins", 7)) {
+			if(!assoc_cond->max_cpu_mins_pj_list)
+				assoc_cond->max_cpu_mins_pj_list =
+					list_create(slurm_destroy_char);
+			if(slurm_addto_char_list(
+				   assoc_cond->max_cpu_mins_pj_list,
+				   argv[i]+end))
+				a_set = 1;
+		} else if (!strncasecmp (argv[i], "MaxCpus", 7)) {
+			if(!assoc_cond->max_cpus_pj_list)
+				assoc_cond->max_cpus_pj_list =
+					list_create(slurm_destroy_char);
+			if(slurm_addto_char_list(
+				   assoc_cond->max_cpus_pj_list,
+				   argv[i]+end))
+				a_set = 1;
+		} else if (!strncasecmp (argv[i], "MaxJobs", 4)) {
+			if(!assoc_cond->max_jobs_list)
+				assoc_cond->max_jobs_list =
+					list_create(slurm_destroy_char);
+			if(slurm_addto_char_list(
+				   assoc_cond->max_jobs_list,
+				   argv[i]+end))
+				a_set = 1;
+		} else if (!strncasecmp (argv[i], "MaxNodes", 4)) {
+			if(!assoc_cond->max_nodes_pj_list)
+				assoc_cond->max_nodes_pj_list =
+					list_create(slurm_destroy_char);
+			if(slurm_addto_char_list(
+				   assoc_cond->max_nodes_pj_list,
+				   argv[i]+end))
+				a_set = 1;
+		} else if (!strncasecmp (argv[i], "MaxSubmitJobs", 4)) {
+			if(!assoc_cond->max_submit_jobs_list)
+				assoc_cond->max_submit_jobs_list =
+					list_create(slurm_destroy_char);
+			if(slurm_addto_char_list(
+				   assoc_cond->max_submit_jobs_list,
+				   argv[i]+end))
+				a_set = 1;
+		} else if (!strncasecmp (argv[i], "MaxWall", 4)) {
+			if(!assoc_cond->max_wall_pj_list)
+				assoc_cond->max_wall_pj_list =
+					list_create(slurm_destroy_char);
+			if(slurm_addto_char_list(
+				   assoc_cond->max_wall_pj_list,
+				   argv[i]+end))
+				a_set = 1;
 		} else if (!strncasecmp (argv[i], "Partition", 3)) {
-			if(!user_cond->assoc_cond->partition_list) {
-				user_cond->assoc_cond->partition_list = 
+			if(!assoc_cond->partition_list) {
+				assoc_cond->partition_list = 
 					list_create(slurm_destroy_char);
 			}
 			if(slurm_addto_char_list(
-				   user_cond->assoc_cond->partition_list, 
+				   assoc_cond->partition_list, 
 				   argv[i]+end))
 				a_set = 1;
 		} else if (!strncasecmp (argv[i], "QosLevel", 1)) {
 			int option = 0;
-			if(!user_cond->qos_list) {
-				user_cond->qos_list = 
+			if(!assoc_cond->qos_list) {
+				assoc_cond->qos_list = 
 					list_create(slurm_destroy_char);
 			}
 			
@@ -142,8 +245,8 @@ static int _set_cond(int *start, int argc, char *argv[],
 					db_conn, my_uid, NULL);
 			}
 
-			addto_qos_char_list(user_cond->qos_list, qos_list,
-					    argv[i]+end, option);
+			addto_qos_char_list(assoc_cond->qos_list,
+					    qos_list, argv[i]+end, option);
 			u_set = 1;
 		} else {
 			exit_code=1;
@@ -170,7 +273,7 @@ static int _set_cond(int *start, int argc, char *argv[],
 
 static int _set_rec(int *start, int argc, char *argv[],
 		    acct_user_rec_t *user,
-		    acct_association_rec_t *association)
+		    acct_association_rec_t *assoc)
 {
 	int i, mins;
 	int u_set = 0;
@@ -195,41 +298,98 @@ static int _set_rec(int *start, int argc, char *argv[],
 				str_2_acct_admin_level(argv[i]+end);
 			u_set = 1;
 		} else if (!strncasecmp (argv[i], "DefaultAccount", 1)) {
+			if(user->default_acct)
+				xfree(user->default_acct);
 			user->default_acct = strip_quotes(argv[i]+end, NULL);
 			u_set = 1;
 		} else if (!strncasecmp (argv[i], "FairShare", 1)) {
-			if(!association)
+			if(!assoc)
 				continue;
-			if (get_uint(argv[i]+end, &association->fairshare, 
+			if (get_uint(argv[i]+end, &assoc->fairshare, 
 				     "FairShare") == SLURM_SUCCESS)
 				a_set = 1;
-		} else if (!strncasecmp (argv[i], "MaxCPUSec", 4)) {
-			if(!association)
+		} else if (!strncasecmp (argv[i], "GrpCPUMins", 7)) {
+			if(!assoc)
 				continue;
-			if (get_uint(argv[i]+end, 
-				     &association->max_cpu_secs_per_job, 
-				     "MaxCPUSec") == SLURM_SUCCESS)
+			if (get_uint64(argv[i]+end, 
+				       &assoc->grp_cpu_mins, 
+				       "GrpCPUMins") == SLURM_SUCCESS)
+				a_set = 1;
+		} else if (!strncasecmp (argv[i], "GrpCpus", 7)) {
+			if(!assoc)
+				continue;
+			if (get_uint(argv[i]+end, &assoc->grp_cpus,
+			    "GrpCpus") == SLURM_SUCCESS)
+				a_set = 1;
+		} else if (!strncasecmp (argv[i], "GrpJobs", 4)) {
+			if(!assoc)
+				continue;
+			if (get_uint(argv[i]+end, &assoc->grp_jobs,
+			    "GrpJobs") == SLURM_SUCCESS)
+				a_set = 1;
+		} else if (!strncasecmp (argv[i], "GrpNodes", 4)) {
+			if(!assoc)
+				continue;
+			if (get_uint(argv[i]+end, &assoc->grp_nodes,
+			    "GrpNodes") == SLURM_SUCCESS)
+				a_set = 1;
+		} else if (!strncasecmp (argv[i], "GrpSubmitJobs", 4)) {
+			if(!assoc)
+				continue;
+			if (get_uint(argv[i]+end, &assoc->grp_submit_jobs,
+			    "GrpSubmitJobs") == SLURM_SUCCESS)
+				a_set = 1;
+		} else if (!strncasecmp (argv[i], "GrpWall", 4)) {
+			if(!assoc)
+				continue;
+			mins = time_str2mins(argv[i]+end);
+			if (mins != NO_VAL) {
+				assoc->grp_wall	= (uint32_t) mins;
+				a_set = 1;
+			} else {
+				exit_code=1;
+				fprintf(stderr, 
+					" Bad GrpWall time format: %s\n", 
+					argv[i]);
+			}
+		} else if (!strncasecmp (argv[i], "MaxCPUMins", 7)) {
+			if(!assoc)
+				continue;
+			if (get_uint64(argv[i]+end, 
+				       &assoc->max_cpu_mins_pj, 
+				       "MaxCPUMins") == SLURM_SUCCESS)
+				a_set = 1;
+		} else if (!strncasecmp (argv[i], "MaxCpus", 7)) {
+			if(!assoc)
+				continue;
+			if (get_uint(argv[i]+end, &assoc->max_cpus_pj,
+			    "MaxCpus") == SLURM_SUCCESS)
 				a_set = 1;
 		} else if (!strncasecmp (argv[i], "MaxJobs", 4)) {
-			if(!association)
+			if(!assoc)
 				continue;
-			if (get_uint(argv[i]+end, &association->max_jobs, 
+			if (get_uint(argv[i]+end, &assoc->max_jobs,
 			    "MaxJobs") == SLURM_SUCCESS)
 				a_set = 1;
 		} else if (!strncasecmp (argv[i], "MaxNodes", 4)) {
-			if(!association)
+			if(!assoc)
 				continue;
-			if (get_uint(argv[i]+end,
-			    &association->max_nodes_per_job, 
+			if (get_uint(argv[i]+end, 
+			    &assoc->max_nodes_pj,
 			    "MaxNodes") == SLURM_SUCCESS)
 				a_set = 1;
+		} else if (!strncasecmp (argv[i], "MaxSubmitJobs", 4)) {
+			if(!assoc)
+				continue;
+			if (get_uint(argv[i]+end, &assoc->max_submit_jobs,
+			    "MaxSubmitJobs") == SLURM_SUCCESS)
+				a_set = 1;
 		} else if (!strncasecmp (argv[i], "MaxWall", 4)) {
-			if(!association)
+			if(!assoc)
 				continue;
 			mins = time_str2mins(argv[i]+end);
 			if (mins != NO_VAL) {
-				association->max_wall_duration_per_job 
-					= (uint32_t) mins;
+				assoc->max_wall_pj = (uint32_t) mins;
 				a_set = 1;
 			} else {
 				exit_code=1;
@@ -239,24 +399,26 @@ static int _set_rec(int *start, int argc, char *argv[],
 			}
 		} else if (!strncasecmp (argv[i], "QosLevel", 1)) {
 			int option = 0;
-			if(!user->qos_list) {
-				user->qos_list = 
+			if(!assoc)
+				continue;
+			if(!assoc->qos_list) 
+				assoc->qos_list = 
 					list_create(slurm_destroy_char);
-			}
-			
-			if(!qos_list) {
+						
+			if(!qos_list) 
 				qos_list = acct_storage_g_get_qos(
 					db_conn, my_uid, NULL);
-			}
-
+						
 			if(end > 2 && argv[i][end-1] == '='
 			   && (argv[i][end-2] == '+' 
 			       || argv[i][end-2] == '-'))
 				option = (int)argv[i][end-2];
 
-			addto_qos_char_list(user->qos_list, qos_list,
-					    argv[i]+end, option);
-			u_set = 1;
+			if(addto_qos_char_list(assoc->qos_list,
+					       qos_list, argv[i]+end, option))
+				a_set = 1;
+			else
+				exit_code = 1;
 		} else {
 			exit_code=1;
 			fprintf(stderr, " Unknown option: %s\n"
@@ -264,11 +426,12 @@ static int _set_rec(int *start, int argc, char *argv[],
 				argv[i]);
 		}		
 	}	
-	if(qos_list)
-		list_destroy(qos_list);
 
 	(*start) = i;
 
+	if(qos_list)
+		list_destroy(qos_list);
+
 	if(u_set && a_set)
 		return 3;
 	else if(u_set)
@@ -278,6 +441,120 @@ static int _set_rec(int *start, int argc, char *argv[],
 	return 0;
 }
 
+/*
+ * IN: user_cond - used for the assoc_cond pointing to the user and
+ *     acct list 
+ * IN: check - whether or not to check if the existance of the above lists
+ */
+static int _check_coord_request(acct_user_cond_t *user_cond, bool check)
+{
+	ListIterator itr = NULL, itr2 = NULL;
+	char *name = NULL, *name2 = NULL;
+
+	acct_account_cond_t account_cond;
+	List local_acct_list = NULL;
+	List local_user_list = NULL;
+	int rc = SLURM_SUCCESS;
+
+	if(!user_cond) {
+		exit_code=1;
+		fprintf(stderr, " You need to specify the user_cond here.\n"); 
+		return SLURM_ERROR;
+	}
+
+	if(!check && (!user_cond->assoc_cond->user_list
+		      || !list_count(user_cond->assoc_cond->user_list))) {
+		exit_code=1;
+		fprintf(stderr, " You need to specify a user list here.\n"); 
+		return SLURM_ERROR;	
+	}
+
+	if(!check && (!user_cond->assoc_cond->acct_list
+		      || !list_count(user_cond->assoc_cond->acct_list))) {
+		exit_code=1;
+		fprintf(stderr, " You need to specify a account list here.\n"); 
+		return SLURM_ERROR;	
+	}
+
+	memset(&account_cond, 0, sizeof(acct_account_cond_t));
+	account_cond.assoc_cond = user_cond->assoc_cond;
+	local_acct_list =
+		acct_storage_g_get_accounts(db_conn, my_uid, &account_cond);
+	if(!local_acct_list) {
+		exit_code=1;
+		fprintf(stderr, " Problem getting accounts from database.  "
+			"Contact your admin.\n");
+		return SLURM_ERROR;
+	}
+
+	if(user_cond->assoc_cond->acct_list && 
+	   (list_count(local_acct_list) != 
+	    list_count(user_cond->assoc_cond->acct_list))) {
+		
+		itr = list_iterator_create(user_cond->assoc_cond->acct_list);
+		itr2 = list_iterator_create(local_acct_list);
+		
+		while((name = list_next(itr))) {
+			while((name2 = list_next(itr2))) {
+				if(!strcmp(name, name2)) 
+					break;
+			}
+			list_iterator_reset(itr2);
+			if(!name2) {
+				fprintf(stderr, 
+					" You specified a non-existant "
+					"account '%s'.\n", name); 
+				exit_code=1;
+				rc = SLURM_ERROR;
+			}
+		}
+		list_iterator_destroy(itr);
+		list_iterator_destroy(itr2);
+	}
+
+	local_user_list = acct_storage_g_get_users(db_conn, my_uid, user_cond);
+	if(!local_user_list) {
+		exit_code=1;
+		fprintf(stderr, " Problem getting users from database.  "
+			"Contact your admin.\n");
+		if(local_acct_list)
+			list_destroy(local_acct_list);
+		return SLURM_ERROR;
+	}
+
+	if(user_cond->assoc_cond->user_list &&
+	   (list_count(local_user_list) != 
+	    list_count(user_cond->assoc_cond->user_list))) {
+		
+		itr = list_iterator_create(user_cond->assoc_cond->user_list);
+		itr2 = list_iterator_create(local_user_list);
+		
+		while((name = list_next(itr))) {
+			while((name2 = list_next(itr2))) {
+				if(!strcmp(name, name2)) 
+					break;
+			}
+			list_iterator_reset(itr2);
+			if(!name2) {
+				fprintf(stderr, 
+					" You specified a non-existant "
+					"user '%s'.\n", name); 
+				exit_code=1;
+				rc = SLURM_ERROR;
+			}
+		}
+		list_iterator_destroy(itr);
+		list_iterator_destroy(itr2);
+	}
+
+	if(local_acct_list)
+		list_destroy(local_acct_list);
+	if(local_user_list)
+		list_destroy(local_user_list);
+
+	return rc;
+}
+
 extern int sacctmgr_add_user(int argc, char *argv[])
 {
 	int rc = SLURM_SUCCESS;
@@ -288,10 +565,10 @@ extern int sacctmgr_add_user(int argc, char *argv[])
 	ListIterator itr_p = NULL;
 	acct_user_rec_t *user = NULL;
 	acct_association_rec_t *assoc = NULL;
+	acct_association_rec_t start_assoc;
 	char *default_acct = NULL;
 	acct_association_cond_t *assoc_cond = NULL;
 	acct_association_cond_t query_assoc_cond;
-	List add_qos_list = NULL;
 	List qos_list = NULL;
 	acct_admin_level_t admin_level = ACCT_ADMIN_NOTSET;
 	char *name = NULL, *account = NULL, *cluster = NULL, *partition = NULL;
@@ -301,11 +578,6 @@ extern int sacctmgr_add_user(int argc, char *argv[])
 	List local_assoc_list = NULL;
 	List local_acct_list = NULL;
 	List local_user_list = NULL;
-	uint32_t fairshare = NO_VAL; 
-	uint32_t max_jobs = NO_VAL; 
-	uint32_t max_nodes_per_job = NO_VAL;
-	uint32_t max_wall_duration_per_job = NO_VAL;
-	uint32_t max_cpu_secs_per_job = NO_VAL;
 	char *user_str = NULL;
 	char *assoc_str = NULL;
 	int limit_set = 0, mins;
@@ -317,8 +589,10 @@ extern int sacctmgr_add_user(int argc, char *argv[])
 /* 		       " Please contact your administrator.\n"); */
 /* 		return SLURM_ERROR; */
 /* 	} */
+	init_acct_association_rec(&start_assoc);
 
 	assoc_cond = xmalloc(sizeof(acct_association_cond_t));
+
 	assoc_cond->user_list = list_create(slurm_destroy_char);
 	assoc_cond->acct_list = list_create(slurm_destroy_char);
 	assoc_cond->cluster_list = list_create(slurm_destroy_char);
@@ -326,7 +600,9 @@ extern int sacctmgr_add_user(int argc, char *argv[])
 
 	for (i=0; i<argc; i++) {
 		int end = parse_option_end(argv[i]);
-		if(!end) {
+		if(!end
+		   || !strncasecmp (argv[i], "Names", 1)
+		   || !strncasecmp (argv[i], "Users", 1)) {
 			slurm_addto_char_list(assoc_cond->user_list,
 					      argv[i]+end);
 		} else if (!strncasecmp (argv[i], "Accounts", 2)) {
@@ -342,25 +618,67 @@ extern int sacctmgr_add_user(int argc, char *argv[])
 			slurm_addto_char_list(assoc_cond->acct_list,
 					default_acct);
 		} else if (!strncasecmp (argv[i], "FairShare", 1)) {
-			if (get_uint(argv[i]+end, &fairshare, 
+			if (get_uint(argv[i]+end, &start_assoc.fairshare, 
 			    "FairShare") == SLURM_SUCCESS)
 				limit_set = 1;
-		} else if (!strncasecmp (argv[i], "MaxCPUSecs", 4)) {
-			if (get_uint(argv[i]+end, &max_cpu_secs_per_job, 
-			    "MaxCPUSecs") == SLURM_SUCCESS)
+		} else if (!strncasecmp (argv[i], "GrpCPUMins", 7)) {
+			if (get_uint64(argv[i]+end, 
+				       &start_assoc.grp_cpu_mins, 
+				       "GrpCPUMins") == SLURM_SUCCESS)
+				limit_set = 1;
+		} else if (!strncasecmp (argv[i], "GrpCpus", 7)) {
+			if (get_uint(argv[i]+end, &start_assoc.grp_cpus,
+			    "GrpCpus") == SLURM_SUCCESS)
+				limit_set = 1;
+		} else if (!strncasecmp (argv[i], "GrpJobs", 4)) {
+			if (get_uint(argv[i]+end, &start_assoc.grp_jobs,
+			    "GrpJobs") == SLURM_SUCCESS)
+				limit_set = 1;
+		} else if (!strncasecmp (argv[i], "GrpNodes", 4)) {
+			if (get_uint(argv[i]+end, &start_assoc.grp_nodes,
+			    "GrpNodes") == SLURM_SUCCESS)
+				limit_set = 1;
+		} else if (!strncasecmp (argv[i], "GrpSubmitJobs", 4)) {
+			if (get_uint(argv[i]+end, &start_assoc.grp_submit_jobs,
+			    "GrpSubmitJobs") == SLURM_SUCCESS)
+				limit_set = 1;
+		} else if (!strncasecmp (argv[i], "GrpWall", 4)) {
+			mins = time_str2mins(argv[i]+end);
+			if (mins != NO_VAL) {
+				start_assoc.grp_wall = (uint32_t) mins;
+				limit_set = 1;
+			} else {
+				exit_code=1;
+				fprintf(stderr, 
+					" Bad GrpWall time format: %s\n", 
+					argv[i]);
+			}
+		} else if (!strncasecmp (argv[i], "MaxCPUMins", 7)) {
+			if (get_uint64(argv[i]+end, 
+				       &start_assoc.max_cpu_mins_pj, 
+				       "MaxCPUMins") == SLURM_SUCCESS)
+				limit_set = 1;
+		} else if (!strncasecmp (argv[i], "MaxCpus", 7)) {
+			if (get_uint(argv[i]+end, &start_assoc.max_cpus_pj,
+			    "MaxCpus") == SLURM_SUCCESS)
 				limit_set = 1;
 		} else if (!strncasecmp (argv[i], "MaxJobs", 4)) {
-			if (get_uint(argv[i]+end, &max_jobs, 
+			if (get_uint(argv[i]+end, &start_assoc.max_jobs,
 			    "MaxJobs") == SLURM_SUCCESS)
 				limit_set = 1;
 		} else if (!strncasecmp (argv[i], "MaxNodes", 4)) {
-			if (get_uint(argv[i]+end, &max_nodes_per_job, 
+			if (get_uint(argv[i]+end, 
+			    &start_assoc.max_nodes_pj,
 			    "MaxNodes") == SLURM_SUCCESS)
 				limit_set = 1;
+		} else if (!strncasecmp (argv[i], "MaxSubmitJobs", 4)) {
+			if (get_uint(argv[i]+end, &start_assoc.max_submit_jobs,
+			    "MaxSubmitJobs") == SLURM_SUCCESS)
+				limit_set = 1;
 		} else if (!strncasecmp (argv[i], "MaxWall", 4)) {
 			mins = time_str2mins(argv[i]+end);
 			if (mins != NO_VAL) {
-				max_wall_duration_per_job = (uint32_t) mins;
+				start_assoc.max_wall_pj = (uint32_t) mins;
 				limit_set = 1;
 			} else {
 				exit_code=1;
@@ -368,26 +686,29 @@ extern int sacctmgr_add_user(int argc, char *argv[])
 					" Bad MaxWall time format: %s\n", 
 					argv[i]);
 			}
-		} else if (!strncasecmp (argv[i], "Names", 1)) {
-			slurm_addto_char_list(assoc_cond->user_list,
-					      argv[i]+end);
 		} else if (!strncasecmp (argv[i], "Partitions", 1)) {
 			slurm_addto_char_list(assoc_cond->partition_list,
 					argv[i]+end);
 		} else if (!strncasecmp (argv[i], "QosLevel", 1)) {
 			int option = 0;
-			if(!add_qos_list) {
-				add_qos_list = 
+			if(!start_assoc.qos_list) 
+				start_assoc.qos_list = 
 					list_create(slurm_destroy_char);
-			}
 			
-			if(!qos_list) {
+			if(!qos_list) 
 				qos_list = acct_storage_g_get_qos(
 					db_conn, my_uid, NULL);
-			}
+			
+			if(end > 2 && argv[i][end-1] == '='
+			   && (argv[i][end-2] == '+' 
+			       || argv[i][end-2] == '-'))
+				option = (int)argv[i][end-2];
 
-			addto_qos_char_list(add_qos_list, qos_list,
-					    argv[i]+end, option);
+			if(addto_qos_char_list(start_assoc.qos_list, qos_list,
+					       argv[i]+end, option))
+				limit_set = 1;
+			else
+				exit_code = 1;
 		} else {
 			exit_code=1;
 			fprintf(stderr, " Unknown option: %s\n", argv[i]);
@@ -537,29 +858,17 @@ extern int sacctmgr_add_user(int argc, char *argv[])
 					xfree(warning);
 					rc = SLURM_ERROR;
 					list_flush(user_list);
-					goto no_default;
+					goto end_it;
 				}
 				xfree(warning);
 			}
 
 			user = xmalloc(sizeof(acct_user_rec_t));
-			user->assoc_list = list_create(NULL);
+			user->assoc_list = 
+				list_create(destroy_acct_association_rec);
 			user->name = xstrdup(name);
 			user->default_acct = xstrdup(default_acct);
 
-			if(add_qos_list && list_count(add_qos_list)) {
-				char *tmp_qos = NULL;
-				ListIterator qos_itr = 
-					list_iterator_create(add_qos_list);
-				user->qos_list = 
-					list_create(slurm_destroy_char);
-				while((tmp_qos = list_next(qos_itr))) {
-					list_append(user->qos_list,
-						    xstrdup(tmp_qos));
-				}
-				list_iterator_destroy(qos_itr);
-			}
-
 			user->admin_level = admin_level;
 			
 			xstrfmtcat(user_str, "  %s\n", name);
@@ -611,18 +920,40 @@ extern int sacctmgr_add_user(int argc, char *argv[])
 						continue;
 					assoc = xmalloc(
 						sizeof(acct_association_rec_t));
+					init_acct_association_rec(assoc);
 					assoc->user = xstrdup(name);
 					assoc->acct = xstrdup(account);
 					assoc->cluster = xstrdup(cluster);
 					assoc->partition = xstrdup(partition);
-					assoc->fairshare = fairshare;
-					assoc->max_jobs = max_jobs;
-					assoc->max_nodes_per_job =
-						max_nodes_per_job;
-					assoc->max_wall_duration_per_job =
-						max_wall_duration_per_job;
-					assoc->max_cpu_secs_per_job =
-						max_cpu_secs_per_job;
+					
+					assoc->fairshare = 
+						start_assoc.fairshare;
+
+					assoc->grp_cpu_mins = 
+						start_assoc.grp_cpu_mins;
+					assoc->grp_cpus = start_assoc.grp_cpus;
+					assoc->grp_jobs = start_assoc.grp_jobs;
+					assoc->grp_nodes = 
+						start_assoc.grp_nodes;
+					assoc->grp_submit_jobs = 
+						start_assoc.grp_submit_jobs;
+					assoc->grp_wall = start_assoc.grp_wall;
+					
+					assoc->max_cpu_mins_pj = 
+						start_assoc.max_cpu_mins_pj;
+					assoc->max_cpus_pj = 
+						start_assoc.max_cpus_pj;
+					assoc->max_jobs = start_assoc.max_jobs;
+					assoc->max_nodes_pj = 
+						start_assoc.max_nodes_pj;
+					assoc->max_submit_jobs = 
+						start_assoc.max_submit_jobs;
+					assoc->max_wall_pj =
+						start_assoc.max_wall_pj;
+					
+					assoc->qos_list = copy_char_list(
+						start_assoc.qos_list);
+					
 					if(user) 
 						list_append(user->assoc_list,
 							    assoc);
@@ -648,16 +979,34 @@ extern int sacctmgr_add_user(int argc, char *argv[])
 				}		
 			
 				assoc = xmalloc(sizeof(acct_association_rec_t));
+				init_acct_association_rec(assoc);
 				assoc->user = xstrdup(name);
 				assoc->acct = xstrdup(account);
 				assoc->cluster = xstrdup(cluster);
-				assoc->fairshare = fairshare;
-				assoc->max_jobs = max_jobs;
-				assoc->max_nodes_per_job = max_nodes_per_job;
-				assoc->max_wall_duration_per_job =
-					max_wall_duration_per_job;
-				assoc->max_cpu_secs_per_job =
-					max_cpu_secs_per_job;
+
+				assoc->fairshare = start_assoc.fairshare;
+
+				assoc->grp_cpu_mins = 
+					start_assoc.grp_cpu_mins;
+				assoc->grp_cpus = start_assoc.grp_cpus;
+				assoc->grp_jobs = start_assoc.grp_jobs;
+				assoc->grp_nodes = start_assoc.grp_nodes;
+				assoc->grp_submit_jobs = 
+					start_assoc.grp_submit_jobs;
+				assoc->grp_wall = start_assoc.grp_wall;
+				
+				assoc->max_cpu_mins_pj = 
+					start_assoc.max_cpu_mins_pj;
+				assoc->max_cpus_pj = start_assoc.max_cpus_pj;
+				assoc->max_jobs = start_assoc.max_jobs;
+				assoc->max_nodes_pj = start_assoc.max_nodes_pj;
+				assoc->max_submit_jobs = 
+					start_assoc.max_submit_jobs;
+				assoc->max_wall_pj = start_assoc.max_wall_pj;
+				
+				assoc->qos_list = 
+					copy_char_list(start_assoc.qos_list);
+
 				if(user) 
 					list_append(user->assoc_list, assoc);
 				else 
@@ -694,14 +1043,6 @@ no_default:
 		printf(" Adding User(s)\n%s", user_str);
 		printf(" Settings =\n");
 		printf("  Default Account = %s\n", default_acct);
-		if(add_qos_list) {
-			char *temp_char = get_qos_complete_str(
-				qos_list, add_qos_list);
-			if(temp_char) {		
-				printf("  Qos             = %s\n", temp_char);
-				xfree(temp_char);
-			}
-		}
 		
 		if(admin_level != ACCT_ADMIN_NOTSET)
 			printf("  Admin Level     = %s\n", 
@@ -715,36 +1056,10 @@ no_default:
 	}
 
 	if(limit_set) {
-		        printf(" Non Default Settings\n");
-		if(fairshare == INFINITE)
-			printf("  Fairshare       = NONE\n");
-		else if(fairshare != NO_VAL) 
-			printf("  Fairshare       = %u\n", fairshare);
-		
-		if(max_cpu_secs_per_job == INFINITE)
-			printf("  MaxCPUSecs      = NONE\n");
-		else if(max_cpu_secs_per_job != NO_VAL) 
-			printf("  MaxCPUSecs      = %u\n",
-			       max_cpu_secs_per_job);
-		
-		if(max_jobs == INFINITE) 
-			printf("  MaxJobs         = NONE\n");
-		else if(max_jobs != NO_VAL) 
-			printf("  MaxJobs         = %u\n", max_jobs);
-		
-		if(max_nodes_per_job == INFINITE)
-			printf("  MaxNodes        = NONE\n");
-		else if(max_nodes_per_job != NO_VAL)
-			printf("  MaxNodes        = %u\n", max_nodes_per_job);
-		
-		if(max_wall_duration_per_job == INFINITE) 
-			printf("  MaxWall         = NONE\n");		
-		else if(max_wall_duration_per_job != NO_VAL) {
-			char time_buf[32];
-			mins2time_str((time_t) max_wall_duration_per_job, 
-				      time_buf, sizeof(time_buf));
-			printf("  MaxWall         = %s\n", time_buf);
-		}
+		printf(" Non Default Settings\n");
+		sacctmgr_print_assoc_limits(&start_assoc);
+		if(start_assoc.qos_list)
+			list_destroy(start_assoc.qos_list);
 	}
 
 	notice_thread_init();
@@ -779,8 +1094,6 @@ no_default:
 	}
 
 end_it:
-	if(add_qos_list)
-		list_destroy(add_qos_list);
 	list_destroy(user_list);
 	list_destroy(assoc_list);
 	xfree(default_acct);
@@ -814,31 +1127,23 @@ extern int sacctmgr_add_coord(int argc, char *argv[])
 		return SLURM_ERROR;
 	}
 
+	if((_check_coord_request(user_cond, true) == SLURM_ERROR)
+	   || exit_code) {
+		destroy_acct_user_cond(user_cond);
+		return SLURM_ERROR;
+	}
+
 	itr = list_iterator_create(user_cond->assoc_cond->user_list);
 	while((name = list_next(itr))) {
 		xstrfmtcat(user_str, "  %s\n", name);
-
 	}
 	list_iterator_destroy(itr);
 
-	if(!user_str) {
-		exit_code=1;
-		fprintf(stderr, " You need to specify a user list here.\n"); 
-		destroy_acct_user_cond(user_cond);
-		return SLURM_ERROR;		
-	}
 	itr = list_iterator_create(user_cond->assoc_cond->acct_list);
 	while((name = list_next(itr))) {
 		xstrfmtcat(acct_str, "  %s\n", name);
-
 	}
 	list_iterator_destroy(itr);
-	if(!acct_str) {
-		exit_code=1;
-		fprintf(stderr, " You need to specify a account list here.\n"); 
-		destroy_acct_user_cond(user_cond);
-		return SLURM_ERROR;		
-	}
 
 	printf(" Adding Coordinator User(s)\n%s", user_str);
 	printf(" To Account(s) and all sub-accounts\n%s", acct_str);
@@ -892,10 +1197,18 @@ extern int sacctmgr_list_user(int argc, char *argv[])
 		PRINT_COORDS,
 		PRINT_DACCT,
 		PRINT_FAIRSHARE,
+		PRINT_GRPCM,
+		PRINT_GRPC,
+		PRINT_GRPJ,
+		PRINT_GRPN,
+		PRINT_GRPS,
+		PRINT_GRPW,
 		PRINT_ID,
 		PRINT_MAXC,
+		PRINT_MAXCM,
 		PRINT_MAXJ,
 		PRINT_MAXN,
+		PRINT_MAXS,
 		PRINT_MAXW,
 		PRINT_QOS,
 		PRINT_QOS_RAW,
@@ -916,10 +1229,11 @@ extern int sacctmgr_list_user(int argc, char *argv[])
 	}
 
 	if(!list_count(format_list)) {
-		slurm_addto_char_list(format_list, "U,D,Q,Ad");
+		slurm_addto_char_list(format_list, "U,D,Ad");
 		if(user_cond->with_assocs)
 			slurm_addto_char_list(format_list,
-					"Cl,Ac,Part,F,MaxC,MaxJ,MaxN,MaxW");
+					      "Cl,Ac,Part,F,"
+					      "MaxJ,MaxN,MaxS,MaxW,QOS");
 		if(user_cond->with_coords)
 			slurm_addto_char_list(format_list, "Coord");
 	}
@@ -939,6 +1253,7 @@ extern int sacctmgr_list_user(int argc, char *argv[])
 
 	itr = list_iterator_create(format_list);
 	while((object = list_next(itr))) {
+		char *tmp_char = NULL;
 		field = xmalloc(sizeof(print_field_t));
 		if(!strncasecmp("Account", object, 2)) {
 			field->type = PRINT_ACCOUNT;
@@ -970,15 +1285,50 @@ extern int sacctmgr_list_user(int argc, char *argv[])
 			field->name = xstrdup("FairShare");
 			field->len = 9;
 			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("GrpCPUMins", object, 8)) {
+			field->type = PRINT_GRPCM;
+			field->name = xstrdup("GrpCPUMins");
+			field->len = 11;
+			field->print_routine = print_fields_uint64;
+		} else if(!strncasecmp("GrpCPUs", object, 8)) {
+			field->type = PRINT_GRPC;
+			field->name = xstrdup("GrpCPUs");
+			field->len = 8;
+			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("GrpJobs", object, 4)) {
+			field->type = PRINT_GRPJ;
+			field->name = xstrdup("GrpJobs");
+			field->len = 7;
+			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("GrpNodes", object, 4)) {
+			field->type = PRINT_GRPN;
+			field->name = xstrdup("GrpNodes");
+			field->len = 8;
+			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("GrpSubmitJobs", object, 4)) {
+			field->type = PRINT_GRPS;
+			field->name = xstrdup("GrpSubmit");
+			field->len = 9;
+			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("GrpWall", object, 4)) {
+			field->type = PRINT_GRPW;
+			field->name = xstrdup("GrpWall");
+			field->len = 11;
+			field->print_routine = print_fields_time;
 		} else if(!strncasecmp("ID", object, 1)) {
 			field->type = PRINT_ID;
 			field->name = xstrdup("ID");
 			field->len = 6;
 			field->print_routine = print_fields_uint;
-		} else if(!strncasecmp("MaxCPUSecs", object, 4)) {
-			field->type = PRINT_MAXC;
-			field->name = xstrdup("MaxCPUSecs");
+		} else if(!strncasecmp("MaxCPUMins", object, 7)) {
+			field->type = PRINT_MAXCM;
+			field->name = xstrdup("MaxCPUMins");
 			field->len = 11;
+			field->print_routine = print_fields_uint64;
+		} else if(!strncasecmp("MaxCPUs", object, 7)) {
+			field->type = PRINT_MAXC;
+			field->name = xstrdup("MaxCPUs");
+			field->len = 8;
 			field->print_routine = print_fields_uint;
 		} else if(!strncasecmp("MaxJobs", object, 4)) {
 			field->type = PRINT_MAXJ;
@@ -990,6 +1340,11 @@ extern int sacctmgr_list_user(int argc, char *argv[])
 			field->name = xstrdup("MaxNodes");
 			field->len = 8;
 			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("MaxSubmitJobs", object, 4)) {
+			field->type = PRINT_MAXS;
+			field->name = xstrdup("MaxSubmit");
+			field->len = 9;
+			field->print_routine = print_fields_uint;
 		} else if(!strncasecmp("MaxWall", object, 4)) {
 			field->type = PRINT_MAXW;
 			field->name = xstrdup("MaxWall");
@@ -1027,6 +1382,11 @@ extern int sacctmgr_list_user(int argc, char *argv[])
 			xfree(field);
 			continue;
 		}
+		if((tmp_char = strstr(object, "\%"))) {
+			int newlen = atoi(tmp_char+1);
+			if(newlen > 0) 
+				field->len = newlen;
+		}
 		list_append(print_fields_list, field);		
 	}
 	list_iterator_destroy(itr);
@@ -1109,6 +1469,48 @@ extern int sacctmgr_list_user(int argc, char *argv[])
 							(curr_inx == 
 							 field_count));
 						break;
+					case PRINT_GRPCM:
+						field->print_routine(
+							field,
+							assoc->grp_cpu_mins,
+							(curr_inx == 
+							 field_count));
+						break;
+					case PRINT_GRPC:
+						field->print_routine(
+							field,
+							assoc->grp_cpus,
+							(curr_inx == 
+							 field_count));
+						break;
+					case PRINT_GRPJ:
+						field->print_routine(
+							field, 
+							assoc->grp_jobs,
+							(curr_inx
+							 == field_count));
+						break;
+					case PRINT_GRPN:
+						field->print_routine(
+							field,
+							assoc->grp_nodes,
+							(curr_inx
+							 == field_count));
+						break;
+					case PRINT_GRPS:
+						field->print_routine(
+							field, 
+						assoc->grp_submit_jobs,
+							(curr_inx
+							 == field_count));
+						break;
+					case PRINT_GRPW:
+						field->print_routine(
+							field,
+							assoc->grp_wall,
+							(curr_inx
+							 == field_count));
+						break;
 					case PRINT_ID:
 						field->print_routine(
 							field,
@@ -1116,11 +1518,18 @@ extern int sacctmgr_list_user(int argc, char *argv[])
 							(curr_inx == 
 							 field_count));
 						break;
-					case PRINT_MAXC:
+					case PRINT_MAXCM:
 						field->print_routine(
 							field,
 							assoc->
-							max_cpu_secs_per_job,
+							max_cpu_mins_pj,
+							(curr_inx == 
+							 field_count));
+						break;
+					case PRINT_MAXC:
+						field->print_routine(
+							field,
+							assoc->max_cpus_pj,
 							(curr_inx == 
 							 field_count));
 						break;
@@ -1135,15 +1544,22 @@ extern int sacctmgr_list_user(int argc, char *argv[])
 						field->print_routine(
 							field,
 							assoc->
-							max_nodes_per_job,
+							max_nodes_pj,
 							(curr_inx == 
 							 field_count));
 						break;
+					case PRINT_MAXS:
+						field->print_routine(
+							field, 
+							assoc->max_submit_jobs,
+							(curr_inx ==
+							 field_count));
+						break;
 					case PRINT_MAXW:
 						field->print_routine(
 							field,
 							assoc->
-							max_wall_duration_per_job,
+							max_wall_pj,
 							(curr_inx == 
 							 field_count));
 						break;
@@ -1158,22 +1574,14 @@ extern int sacctmgr_list_user(int argc, char *argv[])
 						field->print_routine(
 							field,
 							qos_list,
-							user->qos_list,
+							assoc->qos_list,
 							(curr_inx == 
 							 field_count));
 						break;
 					case PRINT_QOS_RAW:
-						if(!qos_list) {
-							qos_list = 
-								acct_storage_g_get_qos(
-									db_conn,
-									my_uid,
-									NULL);
-						}
 						field->print_routine(
 							field,
-							qos_list,
-							user->qos_list,
+							NULL,
 							(curr_inx == 
 							 field_count));
 						break;
@@ -1206,6 +1614,10 @@ extern int sacctmgr_list_user(int argc, char *argv[])
 							 field_count));
 						break;
 					default:
+						field->print_routine(
+							field, NULL,
+							(curr_inx ==
+							 field_count));
 						break;
 					}
 					curr_inx++;
@@ -1218,109 +1630,55 @@ extern int sacctmgr_list_user(int argc, char *argv[])
 			int curr_inx = 1;
 			while((field = list_next(itr2))) {
 				switch(field->type) {
+					/* All the association stuff */
 				case PRINT_ACCOUNT:
-					field->print_routine(
-						field, 
-						NULL,
-						(curr_inx == field_count));
-					break;
-				case PRINT_ADMIN:
-					field->print_routine(
-						field,
-						acct_admin_level_str(
-							user->admin_level),
-						(curr_inx == field_count));
-					break;
 				case PRINT_CLUSTER:
-					field->print_routine(
-						field,
-						NULL,
-						(curr_inx == field_count));
-					break;
-				case PRINT_COORDS:
-					field->print_routine(
-						field,
-						user->coord_accts,
-						(curr_inx == field_count));
-					break;
-				case PRINT_DACCT:
-					field->print_routine(
-						field,
-						user->default_acct,
-						(curr_inx == field_count));
-					break;
 				case PRINT_FAIRSHARE:
-					field->print_routine(
-						field,
-						NULL,
-						(curr_inx == field_count));
-					break;
+				case PRINT_GRPCM:
+				case PRINT_GRPC:
+				case PRINT_GRPJ:
+				case PRINT_GRPN:
+				case PRINT_GRPS:
+				case PRINT_GRPW:
 				case PRINT_ID:
-					field->print_routine(
-						field,
-						NULL,
-						(curr_inx == field_count));
-					break;
+				case PRINT_MAXCM:
 				case PRINT_MAXC:
-					field->print_routine(
-						field,
-						NULL,
-						(curr_inx == field_count));
-					break;
 				case PRINT_MAXJ:
-					field->print_routine(
-						field, 
-						NULL,
-						(curr_inx == field_count));
-					break;
 				case PRINT_MAXN:
-					field->print_routine(
-						field,
-						NULL,
-						(curr_inx == field_count));
-					break;
+				case PRINT_MAXS:
 				case PRINT_MAXW:
+				case PRINT_QOS_RAW:
+				case PRINT_PID:
+				case PRINT_PNAME:
+				case PRINT_PART:
 					field->print_routine(
-						field,
+						field, 
 						NULL,
 						(curr_inx == field_count));
 					break;
 				case PRINT_QOS:
-					if(!qos_list) {
-						qos_list = 
-							acct_storage_g_get_qos(
-								db_conn,
-								my_uid,
-								NULL);
-					}
 					field->print_routine(
-						field, qos_list,
-						user->qos_list,
+						field, NULL,
+						NULL,
 						(curr_inx == field_count));
 					break;
-				case PRINT_QOS_RAW:
-					if(!qos_list) {
-						qos_list = 
-							acct_storage_g_get_qos(
-								db_conn,
-								my_uid,
-								NULL);
-					}
+				case PRINT_ADMIN:
 					field->print_routine(
-						field, qos_list,
-						user->qos_list,
+						field,
+						acct_admin_level_str(
+							user->admin_level),
 						(curr_inx == field_count));
 					break;
-				case PRINT_PID:
+				case PRINT_COORDS:
 					field->print_routine(
 						field,
-						NULL,
+						user->coord_accts,
 						(curr_inx == field_count));
 					break;
-				case PRINT_PART:
+				case PRINT_DACCT:
 					field->print_routine(
-						field, 
-						NULL,
+						field,
+						user->default_acct,
 						(curr_inx == field_count));
 					break;
 				case PRINT_USER:
@@ -1330,6 +1688,9 @@ extern int sacctmgr_list_user(int argc, char *argv[])
 						(curr_inx == field_count));
 					break;
 				default:
+					field->print_routine(
+						field, NULL,
+						(curr_inx == field_count));
 					break;
 				}
 			curr_inx++;
@@ -1357,11 +1718,7 @@ extern int sacctmgr_modify_user(int argc, char *argv[])
 	int cond_set = 0, rec_set = 0, set = 0;
 	List ret_list = NULL;
 
-	assoc->fairshare = NO_VAL;
-	assoc->max_cpu_secs_per_job = NO_VAL;
-	assoc->max_jobs = NO_VAL;
-	assoc->max_nodes_per_job = NO_VAL;
-	assoc->max_wall_duration_per_job = NO_VAL;
+	init_acct_association_rec(assoc);
 
 	for (i=0; i<argc; i++) {
 		if (!strncasecmp (argv[i], "Where", 5)) {
@@ -1418,6 +1775,9 @@ extern int sacctmgr_modify_user(int argc, char *argv[])
 				   " You specified Accounts in your "
 				   "request.  Did you mean "
 				   "DefaultAccounts?\n")) {
+				if(!user_cond->def_acct_list)
+					user_cond->def_acct_list = 
+						list_create(slurm_destroy_char);
 				list_transfer(user_cond->def_acct_list,
 					      user_cond->assoc_cond->acct_list);
 			}
@@ -1594,6 +1954,12 @@ extern int sacctmgr_delete_coord(int argc, char *argv[])
 		destroy_acct_user_cond(user_cond);
 		return SLURM_ERROR;
 	}
+	if((_check_coord_request(user_cond, false) == SLURM_ERROR)
+	   || exit_code) {
+		destroy_acct_user_cond(user_cond);
+		return SLURM_ERROR;
+	}
+
 	if(user_cond->assoc_cond->user_list) {	
 		itr = list_iterator_create(user_cond->assoc_cond->user_list);
 		while((name = list_next(itr))) {
diff --git a/src/salloc/opt.c b/src/salloc/opt.c
index 6af234e29..46c18f258 100644
--- a/src/salloc/opt.c
+++ b/src/salloc/opt.c
@@ -795,7 +795,7 @@ void set_options(const int argc, char **argv)
 			opt.conn_type = verify_conn_type(optarg);
 			break;
 		case LONG_OPT_BEGIN:
-			opt.begin = parse_time(optarg);
+			opt.begin = parse_time(optarg, 0);
 			if (opt.begin == 0) {
 				fatal("Invalid time specification %s",
 				      optarg);
@@ -985,6 +985,32 @@ static char *_get_shell(void)
 	return pw_ent_ptr->pw_shell;
 }
 
+static int _salloc_default_command (int *argcp, char **argvp[])
+{
+	slurm_ctl_conf_t *cf = slurm_conf_lock();
+
+	if (cf->salloc_default_command) {
+		/*
+		 *  Set argv to "/bin/sh -c 'salloc_default_command'"
+		 */
+		*argcp = 3;
+		*argvp = xmalloc (sizeof (char *) * 4);
+		(*argvp)[0] = "/bin/sh";
+		(*argvp)[1] = "-c";
+		(*argvp)[2] = xstrdup (cf->salloc_default_command);
+		(*argvp)[3] = NULL;
+	}
+	else {
+		*argcp = 1;
+		*argvp = xmalloc (sizeof (char *) * 2);
+		(*argvp)[0] = _get_shell ();
+		(*argvp)[1] = NULL;
+	}
+
+	slurm_conf_unlock();
+	return (0);
+}
+
 /* 
  * _opt_verify : perform some post option processing verification
  *
@@ -1004,14 +1030,14 @@ static bool _opt_verify(void)
 	if ((opt.job_name == NULL) && (command_argc > 0))
 		opt.job_name = base_name(command_argv[0]);
 
-	if ((opt.no_shell == false) && (command_argc == 0)) {
-		/* Using default shell as the user command */
-		command_argc = 1;
-		command_argv = (char **) xmalloc(sizeof(char *) * 2);
-		command_argv[0] = _get_shell();
-		command_argv[1] = NULL;
-	}
+	if ((opt.euid != (uid_t) -1) && (opt.euid != opt.uid)) 
+		opt.uid = opt.euid;
+
+	if ((opt.egid != (gid_t) -1) && (opt.egid != opt.gid)) 
+		opt.gid = opt.egid;
 
+	if ((opt.no_shell == false) && (command_argc == 0))
+		_salloc_default_command (&command_argc, &command_argv);
 
 	/* check for realistic arguments */
 	if (opt.nprocs <= 0) {
@@ -1057,7 +1083,8 @@ static bool _opt_verify(void)
 				info("Too few processes ((n/plane_size) %d < N %d) "
 				     "and ((N-1)*(plane_size) %d >= n %d)) ",
 				     opt.nprocs/opt.plane_size, opt.min_nodes, 
-				     (opt.min_nodes-1)*opt.plane_size, opt.nprocs);
+				     (opt.min_nodes-1)*opt.plane_size, 
+				     opt.nprocs);
 #endif
 				error("Too few processes for the requested "
 				      "{plane,node} distribution");
@@ -1140,12 +1167,6 @@ static bool _opt_verify(void)
 			opt.time_limit = INFINITE;
 	}
 
-	if ((opt.euid != (uid_t) -1) && (opt.euid != opt.uid)) 
-		opt.uid = opt.euid;
-
-	if ((opt.egid != (gid_t) -1) && (opt.egid != opt.gid)) 
-		opt.gid = opt.egid;
-
 	if (opt.immediate) {
 		char *sched_name = slurm_get_sched_type();
 		if (strcmp(sched_name, "sched/wiki") == 0) {
diff --git a/src/salloc/salloc.c b/src/salloc/salloc.c
index c1a482583..dbe87b405 100644
--- a/src/salloc/salloc.c
+++ b/src/salloc/salloc.c
@@ -191,15 +191,6 @@ int main(int argc, char *argv[])
 		slurm_allocation_msg_thr_destroy(msg_thr);
 		exit(1);
 	}
-	after = time(NULL);
-
-	xsignal(SIGHUP, _exit_on_signal);
-	xsignal(SIGINT, _ignore_signal);
-	xsignal(SIGQUIT, _ignore_signal);
-	xsignal(SIGPIPE, _ignore_signal);
-	xsignal(SIGTERM, _ignore_signal);
-	xsignal(SIGUSR1, _ignore_signal);
-	xsignal(SIGUSR2, _ignore_signal);
 
 	/*
 	 * Allocation granted!
@@ -212,6 +203,16 @@ int main(int argc, char *argv[])
 	}
 
 #endif
+	after = time(NULL);
+
+	xsignal(SIGHUP, _exit_on_signal);
+	xsignal(SIGINT, _ignore_signal);
+	xsignal(SIGQUIT, _ignore_signal);
+	xsignal(SIGPIPE, _ignore_signal);
+	xsignal(SIGTERM, _ignore_signal);
+	xsignal(SIGUSR1, _ignore_signal);
+	xsignal(SIGUSR2, _ignore_signal);
+
 	if (opt.bell == BELL_ALWAYS
 	    || (opt.bell == BELL_AFTER_DELAY
 		&& ((after - before) > DEFAULT_BELL_DELAY))) {
diff --git a/src/sbatch/opt.c b/src/sbatch/opt.c
index 946c75802..b3e18bef0 100644
--- a/src/sbatch/opt.c
+++ b/src/sbatch/opt.c
@@ -1175,7 +1175,7 @@ static void _set_options(int argc, char **argv)
 			opt.conn_type = verify_conn_type(optarg);
 			break;
 		case LONG_OPT_BEGIN:
-			opt.begin = parse_time(optarg);
+			opt.begin = parse_time(optarg, 0);
 			if (opt.begin == 0) {
 				fatal("Invalid time specification %s",
 				      optarg);
@@ -1319,7 +1319,6 @@ static void _set_options(int argc, char **argv)
 		case LONG_OPT_NETWORK:
 			xfree(opt.network);
 			opt.network = xstrdup(optarg);
-			setenv("SLURM_NETWORK", opt.network, 1);
 			break;
 		default:
 			fatal("Unrecognized command line parameter %c",
@@ -1391,7 +1390,7 @@ static void _set_pbs_options(int argc, char **argv)
 	      != -1) {
 		switch (opt_char) {
 		case 'a':
-			opt.begin = parse_time(optarg);			
+			opt.begin = parse_time(optarg, 0);			
 			break;
 		case 'A':
 			xfree(opt.account);
@@ -1738,6 +1737,8 @@ static bool _opt_verify(void)
 
 	if ((opt.job_name == NULL) && (opt.script_argc > 0))
 		opt.job_name = base_name(opt.script_argv[0]);
+	if (opt.job_name)
+		setenv("SLURM_JOB_NAME", opt.job_name, 0);
 
 	/* check for realistic arguments */
 	if (opt.nprocs <= 0) {
@@ -1894,15 +1895,16 @@ static bool _opt_verify(void)
 		error( "--propagate=%s is not valid.", opt.propagate );
 		verified = false;
 	}
+	if (opt.dependency)
+		setenvfs("SLURM_JOB_DEPENDENCY=%s", opt.dependency);
 
 	if (opt.acctg_freq >= 0)
 		setenvf(NULL, "SLURM_ACCTG_FREQ", "%d", opt.acctg_freq); 
 
 #ifdef HAVE_AIX
-	if (opt.network == NULL) {
+	if (opt.network == NULL)
 		opt.network = "us,sn_all,bulk_xfer";
-		setenv("SLURM_NETWORK", opt.network, 1);
-	}
+	setenv("SLURM_NETWORK", opt.network, 1);
 #endif
 
 	return verified;
diff --git a/src/sbatch/sbatch.c b/src/sbatch/sbatch.c
index 31d8f7134..70e39c54f 100644
--- a/src/sbatch/sbatch.c
+++ b/src/sbatch/sbatch.c
@@ -1,7 +1,7 @@
 /*****************************************************************************\
  *  sbatch.c - Submit a SLURM batch script.
  *
- *  $Id: sbatch.c 14958 2008-09-03 17:27:21Z jette $
+ *  $Id: sbatch.c 15034 2008-09-09 20:24:34Z jette $
  *****************************************************************************
  *  Copyright (C) 2006-2007 The Regents of the University of California.
  *  Copyright (C) 2008 Lawrence Livermore National Security.
@@ -406,7 +406,8 @@ static void *get_script_buffer(const char *filename, int *size)
 		ptr = buf + script_size;
 		buf_left = buf_size - script_size;
 	}
-	close(fd);
+	if (filename)
+		close(fd);
 
 	/*
 	 * Finally we perform some sanity tests on the script.
diff --git a/src/scontrol/update_job.c b/src/scontrol/update_job.c
index b981d761d..6a68a73bc 100644
--- a/src/scontrol/update_job.c
+++ b/src/scontrol/update_job.c
@@ -452,7 +452,7 @@ scontrol_update_job (int argc, char *argv[])
 			update_cnt++;
 		}
 		else if (strncasecmp(argv[i], "StartTime=", 10) == 0) {
-			job_msg.begin_time = parse_time(&argv[i][10]);
+			job_msg.begin_time = parse_time(&argv[i][10], 0);
 			update_cnt++;
 		}
 		else {
diff --git a/src/slurmctld/acct_policy.c b/src/slurmctld/acct_policy.c
index 75361fbc1..e5e068ea8 100644
--- a/src/slurmctld/acct_policy.c
+++ b/src/slurmctld/acct_policy.c
@@ -68,13 +68,17 @@ static bool _valid_job_assoc(struct job_record *job_ptr)
 
 	assoc_ptr = job_ptr->assoc_ptr;
 	if ((assoc_ptr == NULL) ||
-	    (assoc_ptr-> id != job_ptr->assoc_id) ||
+	    (assoc_ptr->id  != job_ptr->assoc_id) ||
 	    (assoc_ptr->uid != job_ptr->user_id)) {
 		error("Invalid assoc_ptr for jobid=%u", job_ptr->job_id);
 		bzero(&assoc_rec, sizeof(acct_association_rec_t));
-		assoc_rec.uid       = job_ptr->user_id;
-		assoc_rec.partition = job_ptr->partition;
-		assoc_rec.acct      = job_ptr->account;
+		if(job_ptr->assoc_id)
+			assoc_rec.id = job_ptr->assoc_id;
+		else {
+			assoc_rec.uid       = job_ptr->user_id;
+			assoc_rec.partition = job_ptr->partition;
+			assoc_rec.acct      = job_ptr->account;
+		}
 		if (assoc_mgr_fill_in_assoc(acct_db_conn, &assoc_rec,
 					    accounting_enforce, &assoc_ptr)) {
 			info("_validate_job_assoc: invalid account or "
@@ -94,13 +98,17 @@ static bool _valid_job_assoc(struct job_record *job_ptr)
  */
 extern void acct_policy_job_begin(struct job_record *job_ptr)
 {
-	acct_association_rec_t *assoc_ptr;
+	acct_association_rec_t *assoc_ptr = NULL;
 
 	if (!accounting_enforce || !_valid_job_assoc(job_ptr))
 		return;
 
 	assoc_ptr = job_ptr->assoc_ptr;
-	assoc_ptr->used_jobs++;
+	while(assoc_ptr) {
+		assoc_ptr->used_jobs++;	
+		/* now handle all the group limits of the parents */
+		assoc_ptr = assoc_ptr->parent_assoc_ptr;
+	}
 }
 
 /*
@@ -109,16 +117,19 @@ extern void acct_policy_job_begin(struct job_record *job_ptr)
  */
 extern void acct_policy_job_fini(struct job_record *job_ptr)
 {
-	acct_association_rec_t *assoc_ptr;
+	acct_association_rec_t *assoc_ptr = NULL;
 
 	if (!accounting_enforce || !_valid_job_assoc(job_ptr))
 		return;
 
 	assoc_ptr = job_ptr->assoc_ptr;
-	if (assoc_ptr->used_jobs)
-		assoc_ptr->used_jobs--;
-	else
-		error("acct_policy_job_fini: used_jobs underflow");
+	while(assoc_ptr) {
+		if (assoc_ptr->used_jobs)
+			assoc_ptr->used_jobs--;
+		else
+			debug2("acct_policy_job_fini: used_jobs underflow");
+		assoc_ptr = assoc_ptr->parent_assoc_ptr;
+	}
 }
 
 /*
@@ -132,7 +143,11 @@ extern bool acct_policy_job_runnable(struct job_record *job_ptr)
 {
 	acct_association_rec_t *assoc_ptr;
 	uint32_t time_limit;
+	int parent = 0; /*flag to tell us if we are looking at the
+			 * parent or not 
+			 */
 
+	/* check to see if we are enforcing associations */
 	if (!accounting_enforce)
 		return true;
 
@@ -141,50 +156,147 @@ extern bool acct_policy_job_runnable(struct job_record *job_ptr)
 		return false;
 	}
 
+	/* now see if we are enforcing limits */
+	if (accounting_enforce != ACCOUNTING_ENFORCE_WITH_LIMITS)
+		return true;
+
 	assoc_ptr = job_ptr->assoc_ptr;
+	while(assoc_ptr) {
 #if _DEBUG
-	info("acct_job_limits: %u of %u", 
-	     assoc_ptr->used_jobs, assoc_ptr->max_jobs);
-#endif
+		info("acct_job_limits: %u of %u", 
+		     assoc_ptr->used_jobs, assoc_ptr->max_jobs);
+#endif		
+		/* NOTE: We can't enforce assoc_ptr->grp_cpu_mins at this
+		 * time because we aren't keeping track of how long
+		 * jobs have been running yet */
 
-	if ((assoc_ptr->max_jobs != NO_VAL) &&
-	    (assoc_ptr->max_jobs != INFINITE) &&
-	    (assoc_ptr->used_jobs >= assoc_ptr->max_jobs)) {
-		job_ptr->state_reason = WAIT_ASSOC_LIMIT;
-		return false;
-	}
+		/* NOTE: We can't enforce assoc_ptr->grp_cpus at this
+		 * time because we don't have access to a CPU count for the job
+		 * due to how all of the job's specifications interact */
 
-	/* if the association limits have changed since job
-	 * submission and job can not run, then kill it */
-	if ((assoc_ptr->max_wall_duration_per_job != NO_VAL) &&
-	    (assoc_ptr->max_wall_duration_per_job != INFINITE)) {
-		time_limit = assoc_ptr->max_wall_duration_per_job;
-		if ((job_ptr->time_limit != NO_VAL) &&
-		    (job_ptr->time_limit > time_limit)) {
-			info("job %u being cancelled, time limit exceeds "
-			     "account max (%u > %u)",
-			     job_ptr->job_id, job_ptr->time_limit, time_limit);
-			_cancel_job(job_ptr);
+		if ((assoc_ptr->grp_jobs != NO_VAL) &&
+		    (assoc_ptr->grp_jobs != INFINITE) &&
+		    (assoc_ptr->used_jobs >= assoc_ptr->grp_jobs)) {
+			job_ptr->state_reason = WAIT_ASSOC_LIMIT;
 			return false;
 		}
-	}
+		
+		if ((assoc_ptr->grp_nodes != NO_VAL) &&
+		    (assoc_ptr->grp_nodes != INFINITE)) {
+			if (job_ptr->details->min_nodes > 
+			    assoc_ptr->grp_nodes) {
+				info("job %u being cancelled, "
+				     "min node request %u exceeds "
+				     "group max node limit %u for account %s",
+				     job_ptr->job_id, 
+				     job_ptr->details->min_nodes, 
+				     assoc_ptr->grp_nodes, assoc_ptr->acct);
+				_cancel_job(job_ptr);
+			} else if ((assoc_ptr->grp_used_nodes + 
+				    job_ptr->details->min_nodes) > 
+				   assoc_ptr->grp_nodes) {
+				job_ptr->state_reason = WAIT_ASSOC_LIMIT;
+				return false;
+			}
+		}
+
+		/* we don't need to check submit_jobs here */
+		
+		/* FIX ME: Once we start tracking time of running jobs
+		 * we will need toupdate the amount of time we have
+		 * used and check against that here.  When we start
+		 * keeping track of time we will also need to come up
+		 * with a way to refresh the time. 
+		 */
+		if ((assoc_ptr->grp_wall != NO_VAL) &&
+		    (assoc_ptr->grp_wall != INFINITE)) {
+			time_limit = assoc_ptr->grp_wall;
+			if ((job_ptr->time_limit != NO_VAL) &&
+			    (job_ptr->time_limit > time_limit)) {
+				info("job %u being cancelled, "
+				     "time limit %u exceeds group "
+				     "time limit %u for account %s",
+				     job_ptr->job_id, job_ptr->time_limit, 
+				     time_limit, assoc_ptr->acct);
+				_cancel_job(job_ptr);
+				return false;
+			}
+		}
 
-	if ((assoc_ptr->max_nodes_per_job != NO_VAL) &&
-	    (assoc_ptr->max_nodes_per_job != INFINITE)) {
-		if (job_ptr->details->min_nodes > 
-		    assoc_ptr->max_nodes_per_job) {
-			info("job %u being cancelled,  min node limit exceeds "
-			     "account max (%u > %u)",
-			     job_ptr->job_id, job_ptr->details->min_nodes, 
-			     assoc_ptr->max_nodes_per_job);
-			_cancel_job(job_ptr);
+		
+		/* We don't need to look at the regular limits for
+		 * parents since we have pre-propogated them, so just
+		 * continue with the next parent
+		 */
+		if(parent) {
+			assoc_ptr = assoc_ptr->parent_assoc_ptr;
+			continue;
+		} 
+		
+		/* NOTE: We can't enforce assoc_ptr->max_cpu_mins_pj at this
+		 * time because we don't have access to a CPU count for the job
+		 * due to how all of the job's specifications interact */
+		
+		/* NOTE: We can't enforce assoc_ptr->max_cpus at this
+		 * time because we don't have access to a CPU count for the job
+		 * due to how all of the job's specifications interact */
+
+		if ((assoc_ptr->max_jobs != NO_VAL) &&
+		    (assoc_ptr->max_jobs != INFINITE) &&
+		    (assoc_ptr->used_jobs >= assoc_ptr->max_jobs)) {
+			job_ptr->state_reason = WAIT_ASSOC_LIMIT;
 			return false;
 		}
-	}
+		
+		if ((assoc_ptr->max_nodes_pj != NO_VAL) &&
+		    (assoc_ptr->max_nodes_pj != INFINITE)) {
+			if (job_ptr->details->min_nodes > 
+			    assoc_ptr->max_nodes_pj) {
+				info("job %u being cancelled, "
+				     "min node limit %u exceeds "
+				     "account max %u",
+				     job_ptr->job_id,
+				     job_ptr->details->min_nodes, 
+				     assoc_ptr->max_nodes_pj);
+				_cancel_job(job_ptr);
+				return false;
+			}
+		}
 
-	/* NOTE: We can't enforce assoc_ptr->max_cpu_secs_per_job at this
-	 * time because we don't have access to a CPU count for the job
-	 * due to how all of the job's specifications interact */
+		/* we don't need to check submit_jobs here */
+
+		/* if the association limits have changed since job
+		 * submission and job can not run, then kill it */
+		if ((assoc_ptr->max_wall_pj != NO_VAL) &&
+		    (assoc_ptr->max_wall_pj != INFINITE)) {
+			time_limit = assoc_ptr->max_wall_pj;
+			if ((job_ptr->time_limit != NO_VAL) &&
+			    (job_ptr->time_limit > time_limit)) {
+				info("job %u being cancelled, "
+				     "time limit %u exceeds account max %u",
+				     job_ptr->job_id, job_ptr->time_limit, 
+				     time_limit);
+				_cancel_job(job_ptr);
+				return false;
+			}
+		}		
+	
+		assoc_ptr = assoc_ptr->parent_assoc_ptr;
+		parent = 1;
+	}
 
 	return true;
 }
+
+/* FIX ME: This function should be called every so often to update time, and
+ * shares used.  It doesn't do anything right now.
+ */
+extern void acct_policy_update_running_job_usage(struct job_record *job_ptr)
+{
+	acct_association_rec_t *assoc_ptr;
+	assoc_ptr = job_ptr->assoc_ptr;
+	while(assoc_ptr) {
+
+		assoc_ptr = assoc_ptr->parent_assoc_ptr;
+	}
+}
diff --git a/src/slurmctld/acct_policy.h b/src/slurmctld/acct_policy.h
index cc9c16a62..aaaee9411 100644
--- a/src/slurmctld/acct_policy.h
+++ b/src/slurmctld/acct_policy.h
@@ -59,4 +59,9 @@ extern void acct_policy_job_fini(struct job_record *job_ptr);
  */
 extern bool acct_policy_job_runnable(struct job_record *job_ptr);
 
+/* FIX ME: This function should be called every so often to update time, and
+ * shares used.  It doesn't do anything right now.
+ */
+extern void acct_policy_update_running_job_usage(struct job_record *job_ptr);
+
 #endif /* !_HAVE_ACCT_POLICY_H */
diff --git a/src/slurmctld/agent.c b/src/slurmctld/agent.c
index 64d20e36e..73f77fc03 100644
--- a/src/slurmctld/agent.c
+++ b/src/slurmctld/agent.c
@@ -984,8 +984,8 @@ cleanup:
 						 thread_ptr->start_time);
 	/* Signal completion so another thread can replace us */
 	(*threads_active_ptr)--;
-	slurm_mutex_unlock(thread_mutex_ptr);
 	pthread_cond_signal(thread_cond_ptr);
+	slurm_mutex_unlock(thread_mutex_ptr);
 	return (void *) NULL;
 }
 
diff --git a/src/slurmctld/controller.c b/src/slurmctld/controller.c
index e11631ae8..dd6f5c3e8 100644
--- a/src/slurmctld/controller.c
+++ b/src/slurmctld/controller.c
@@ -143,6 +143,7 @@ int bg_recover = DEFAULT_RECOVER;
 char *slurmctld_cluster_name = NULL; /* name of cluster */
 void *acct_db_conn = NULL;
 int accounting_enforce = 0;
+int association_based_accounting = 0;
 bool ping_nodes_now = false;
 
 /* Local variables */
@@ -302,15 +303,31 @@ int main(int argc, char *argv[])
 	 * memory, it will report 'HashBase' if it is not duped
 	 */
 	slurmctld_cluster_name = xstrdup(slurmctld_conf.cluster_name);
+	association_based_accounting =
+		slurm_get_is_association_based_accounting();
 	accounting_enforce = slurmctld_conf.accounting_storage_enforce;
-	acct_db_conn = acct_storage_g_get_connection(true, false);
+	acct_db_conn = acct_storage_g_get_connection(true, 0, false);
+
+	memset(&assoc_init_arg, 0, sizeof(assoc_init_args_t));
 	assoc_init_arg.enforce = accounting_enforce;
 	assoc_init_arg.remove_assoc_notify = _remove_assoc;
-	if (assoc_mgr_init(acct_db_conn, &assoc_init_arg) &&
-	    accounting_enforce) {
-		error("assoc_mgr_init failure");
-		fatal("slurmdbd and/or database must be up at "
-		      "slurmctld start time");
+	assoc_init_arg.cache_level = ASSOC_MGR_CACHE_ALL;
+
+	if (assoc_mgr_init(acct_db_conn, &assoc_init_arg)) {
+		if(accounting_enforce) 
+			error("Association database appears down, "
+			      "reading from state file.");
+		else
+			debug("Association database appears down, "
+			      "reading from state file.");
+			
+		if ((load_assoc_mgr_state(slurmctld_conf.state_save_location)
+		     != SLURM_SUCCESS) && accounting_enforce) {
+			error("Unable to get any information from "
+			      "the state file");
+			fatal("slurmdbd and/or database must be up at "
+			      "slurmctld start time");
+		}
 	}
 
 	info("slurmctld version %s started on cluster %s",
@@ -337,16 +354,12 @@ int main(int argc, char *argv[])
 	/*
 	 * Initialize plugins.
 	 */
-	if ( slurm_select_init() != SLURM_SUCCESS )
+	if (slurm_select_init() != SLURM_SUCCESS )
 		fatal( "failed to initialize node selection plugin" );
-	if ( checkpoint_init(slurmctld_conf.checkpoint_type) != 
-			SLURM_SUCCESS )
+	if (checkpoint_init(slurmctld_conf.checkpoint_type) != SLURM_SUCCESS )
 		fatal( "failed to initialize checkpoint plugin" );
-	if (slurm_select_init() != SLURM_SUCCESS )
-		fatal( "failed to initialize node selection plugin");
 	if (slurm_acct_storage_init(NULL) != SLURM_SUCCESS )
 		fatal( "failed to initialize accounting_storage plugin");
-
 	if (slurm_jobacct_gather_init() != SLURM_SUCCESS )
 		fatal( "failed to initialize jobacct_gather plugin");
 
@@ -396,9 +409,14 @@ int main(int argc, char *argv[])
 
 		if(!acct_db_conn) {
 			acct_db_conn = 
-				acct_storage_g_get_connection(true, false);
-			if (assoc_mgr_init(acct_db_conn, &assoc_init_arg) &&
-			    accounting_enforce) {
+				acct_storage_g_get_connection(true, 0, false);
+			/* We only send in a variable the first time
+			   we call this since we are setting up static
+			   variables inside the function sending a
+			   NULL will just use those set before.
+			*/
+			if (assoc_mgr_init(acct_db_conn, NULL) &&
+			    accounting_enforce && !running_cache) {
 				error("assoc_mgr_init failure");
 				fatal("slurmdbd and/or database must be up at "
 				      "slurmctld start time");
@@ -406,6 +424,10 @@ int main(int argc, char *argv[])
 		}
 
 		info("Running as primary controller");
+		clusteracct_storage_g_register_ctld(
+			slurmctld_cluster_name, 
+			slurmctld_conf.slurmctld_port);
+		
 		_accounting_cluster_ready();
 		if (slurm_sched_init() != SLURM_SUCCESS)
 			fatal("failed to initialize scheduling plugin");
@@ -422,10 +444,6 @@ int main(int argc, char *argv[])
 			fatal("pthread_create error %m");
 		slurm_attr_destroy(&thread_attr);
 
-		clusteracct_storage_g_register_ctld(
-			slurmctld_conf.cluster_name, 
-			slurmctld_conf.slurmctld_port);
-		
 		/*
 		 * create attached thread for signal handling
 		 */
@@ -508,6 +526,7 @@ int main(int argc, char *argv[])
 	part_fini();	/* part_fini() must preceed node_fini() */
 	node_fini();
 	trigger_fini();
+	assoc_mgr_fini(slurmctld_conf.state_save_location);
 
 	/* Plugins are needed to purge job/node data structures,
 	 * unplug after other data structures are purged */
@@ -519,7 +538,6 @@ int main(int argc, char *argv[])
 	checkpoint_fini();
 	slurm_auth_fini();
 	switch_fini();
-	assoc_mgr_fini();
 
 	/* purge remaining data structures */
 	slurm_cred_ctx_destroy(slurmctld_config.cred_ctx);
@@ -917,6 +935,14 @@ static int _accounting_cluster_ready()
 	rc = clusteracct_storage_g_cluster_procs(acct_db_conn,
 						 slurmctld_cluster_name,
 						 procs, event_time);
+	if(rc == ACCOUNTING_FIRST_REG) {
+		/* see if we are running directly to a database
+		 * instead of a slurmdbd.
+		 */
+		send_jobs_to_accounting(event_time);
+		send_nodes_to_accounting(event_time);
+		rc = SLURM_SUCCESS;
+	}
 
 	return rc;
 }
@@ -1211,6 +1237,7 @@ void save_all_state(void)
 	schedule_node_save();
 	schedule_trigger_save();
 	select_g_state_save(slurmctld_conf.state_save_location);
+	dump_assoc_mgr_state(slurmctld_conf.state_save_location);
 }
 
 /* 
diff --git a/src/slurmctld/job_mgr.c b/src/slurmctld/job_mgr.c
index d0d2dee8a..c945e97d3 100644
--- a/src/slurmctld/job_mgr.c
+++ b/src/slurmctld/job_mgr.c
@@ -106,6 +106,10 @@ static bool     wiki_sched_test = false;
 
 /* Local functions */
 static void _add_job_hash(struct job_record *job_ptr);
+
+static void _acct_add_job_submit(struct job_record *job_ptr);
+static void _acct_remove_job_submit(struct job_record *job_ptr);
+
 static int  _copy_job_desc_to_file(job_desc_msg_t * job_desc,
 				   uint32_t job_id);
 static int  _copy_job_desc_to_job_record(job_desc_msg_t * job_desc,
@@ -737,9 +741,19 @@ static int _load_job_state(Buf buffer)
 	job_ptr->user_id      = user_id;
 
 	bzero(&assoc_rec, sizeof(acct_association_rec_t));
-	assoc_rec.acct      = job_ptr->account;
-	assoc_rec.partition = job_ptr->partition;
-	assoc_rec.uid       = job_ptr->user_id;
+
+	/* 
+	 * For speed and accurracy we will first see if we once had an
+	 * association record.  If not look for it by
+	 * account,partition, user_id.
+	 */
+	if(job_ptr->assoc_id)
+		assoc_rec.id = job_ptr->assoc_id;
+	else {
+		assoc_rec.acct      = job_ptr->account;
+		assoc_rec.partition = job_ptr->partition;
+		assoc_rec.uid       = job_ptr->user_id;
+	}
 
 	if (assoc_mgr_fill_in_assoc(acct_db_conn, &assoc_rec,
 				    accounting_enforce,
@@ -752,14 +766,29 @@ static int _load_job_state(Buf buffer)
 		if (IS_JOB_PENDING(job_ptr))
 			job_ptr->start_time = now;
 		job_ptr->end_time = now;
-		jobacct_storage_g_job_complete(acct_db_conn, job_ptr);
+		if(job_ptr->assoc_id)
+			jobacct_storage_g_job_complete(acct_db_conn, job_ptr);
 	} else {
 		info("Recovered job %u", job_id);
 		job_ptr->assoc_ptr = (void *) assoc_ptr;
+
+		/* make sure we have started this job in accounting */
+		if(job_ptr->assoc_id && !job_ptr->db_index && job_ptr->nodes) {
+			debug("starting job %u in accounting", job_ptr->job_id);
+			jobacct_storage_g_job_start(
+				acct_db_conn, slurmctld_cluster_name, job_ptr);
+			if(job_ptr->job_state == JOB_SUSPENDED) 
+				jobacct_storage_g_job_suspend(acct_db_conn,
+							      job_ptr);
+		}
 	}
 
 	safe_unpack16(&step_flag, buffer);
 	while (step_flag == STEP_FLAG) {
+		/* No need to put these into accounting if they
+		 * haven't been since all information will be put in when
+		 * the job is finished.
+		 */
 		if ((error_code = load_step_state(job_ptr, buffer)))
 			goto unpack_error;
 		safe_unpack16(&step_flag, buffer);
@@ -969,6 +998,43 @@ void _add_job_hash(struct job_record *job_ptr)
 	job_hash[inx] = job_ptr;
 }
 
+/*
+ * _acct_add_job_submit - Note that a job has been submitted
+ *      for accounting policy purposes.
+ */
+static void _acct_add_job_submit(struct job_record *job_ptr)
+{
+	acct_association_rec_t *assoc_ptr = NULL;
+
+	assoc_ptr = job_ptr->assoc_ptr;
+	while(assoc_ptr) {
+		assoc_ptr->used_submit_jobs++;	
+		/* now handle all the group limits of the parents */
+		assoc_ptr = assoc_ptr->parent_assoc_ptr;
+	}
+}
+
+/*
+ * _acct_remove_job_submit - Note that a job has finished (might
+ *      not had started or been allocated resources) for accounting
+ *      policy purposes.
+ */
+static void _acct_remove_job_submit(struct job_record *job_ptr)
+{
+	acct_association_rec_t *assoc_ptr = NULL;
+
+	assoc_ptr = job_ptr->assoc_ptr;
+	while(assoc_ptr) {
+		if (assoc_ptr->used_submit_jobs) 
+			assoc_ptr->used_submit_jobs--;
+		else
+			debug2("_acct_remove_job_submit: "
+			       "used_submit_jobs underflow for account %s",
+			       assoc_ptr->acct);
+		assoc_ptr = assoc_ptr->parent_assoc_ptr;
+	}
+}
+
 
 /* 
  * find_job_record - return a pointer to the job record with the given job_id
@@ -1153,7 +1219,8 @@ extern int kill_running_job_by_node_name(char *node_name, bool step_test)
 						job_ptr->suspend_time;
 					job_ptr->tot_sus_time += 
 						difftime(now,
-							 job_ptr->suspend_time);
+							 job_ptr->
+							 suspend_time);
 				} else
 					job_ptr->end_time = now;
 				
@@ -1462,7 +1529,7 @@ extern int job_allocate(job_desc_msg_t * job_specs, int immediate,
 	time_t now = time(NULL);
 	
 	if (error_code) {
-		if (immediate && job_ptr) {
+		if (job_ptr && (immediate || will_run)) {
 			job_ptr->job_state = JOB_FAILED;
 			job_ptr->exit_code = 1;
 			job_ptr->state_reason = FAIL_BAD_CONSTRAINTS;
@@ -1531,7 +1598,10 @@ extern int job_allocate(job_desc_msg_t * job_specs, int immediate,
 		last_job_update = now;
 		slurm_sched_schedule();	/* work for external scheduler */
 	}
- 
+
+	if (accounting_enforce == ACCOUNTING_ENFORCE_WITH_LIMITS)
+		_acct_add_job_submit(job_ptr);
+
 	if ((error_code == ESLURM_NODES_BUSY) ||
 	    (error_code == ESLURM_JOB_HELD) ||
 	    (error_code == ESLURM_ACCOUNTING_POLICY) ||
@@ -1851,8 +1921,9 @@ extern int job_complete(uint32_t job_id, uid_t uid, bool requeue,
 			job_ptr->job_state = JOB_TIMEOUT  | job_comp_flag;
 			job_ptr->exit_code = MAX(job_ptr->exit_code, 1);
 			job_ptr->state_reason = FAIL_TIMEOUT;
-		} else
+		} else 
 			job_ptr->job_state = JOB_COMPLETE | job_comp_flag;
+		
 		if (suspended) {
 			job_ptr->end_time = job_ptr->suspend_time;
 			job_ptr->tot_sus_time += 
@@ -1896,7 +1967,7 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 	enum job_state_reason fail_reason;
 	struct part_record *part_ptr;
 	bitstr_t *req_bitmap = NULL, *exc_bitmap = NULL;
-	struct job_record *job_ptr;
+	struct job_record *job_ptr = NULL;
 	uint32_t total_nodes, max_procs;
 	acct_association_rec_t assoc_rec, *assoc_ptr;
 	List license_list = NULL;
@@ -1968,19 +2039,9 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 		return error_code;
 	}
 
-	debug3("before alteration asking for nodes %u-%u procs %u", 
-	       job_desc->min_nodes, job_desc->max_nodes,
-	       job_desc->num_procs);
-	select_g_alter_node_cnt(SELECT_SET_NODE_CNT, job_desc);
-	select_g_get_jobinfo(job_desc->select_jobinfo,
-			     SELECT_DATA_MAX_PROCS, &max_procs);
-	debug3("after alteration asking for nodes %u-%u procs %u-%u", 
-	       job_desc->min_nodes, job_desc->max_nodes,
-	       job_desc->num_procs, max_procs);
-	
 	if ((error_code = _validate_job_desc(job_desc, allocate, submit_uid)))
 		return error_code;
- 
+
 	if ((job_desc->user_id == 0) && part_ptr->disable_root_jobs) {
 		error("Security violation, SUBMIT_JOB for user root disabled");
 		return ESLURM_USER_ID_MISSING;
@@ -2013,10 +2074,26 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 		     job_desc->user_id, assoc_rec.acct, assoc_rec.partition);
 		error_code = ESLURM_INVALID_ACCOUNT;
 		return error_code;
+	} else if(association_based_accounting
+		  && !assoc_ptr && !accounting_enforce) {
+		/* if not enforcing associations we want to look for
+		   the default account and use it to avoid getting
+		   trash in the accounting records.
+		*/
+		assoc_rec.acct = NULL;
+		assoc_mgr_fill_in_assoc(acct_db_conn, &assoc_rec,
+					accounting_enforce, &assoc_ptr);
+		if(assoc_ptr) {
+			info("_job_create: account '%s' has no association "
+			     "for user %u using default account '%s'",
+			     job_desc->account, job_desc->user_id,
+			     assoc_rec.acct);
+			xfree(job_desc->account);			
+		}
 	}
 	if (job_desc->account == NULL)
 		job_desc->account = xstrdup(assoc_rec.acct);
-	if (accounting_enforce &&
+	if ((accounting_enforce == ACCOUNTING_ENFORCE_WITH_LIMITS) &&
 	    (!_validate_acct_policy(job_desc, part_ptr, &assoc_rec))) {
 		info("_job_create: exceeded association's node or time limit "
 		     "for user %u", job_desc->user_id);
@@ -2024,6 +2101,19 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 		return error_code;
 	}
 
+	/* This needs to be done after the association acct policy check since
+	 * it looks at unaltered nodes for bluegene systems
+	 */
+	debug3("before alteration asking for nodes %u-%u procs %u", 
+	       job_desc->min_nodes, job_desc->max_nodes,
+	       job_desc->num_procs);
+	select_g_alter_node_cnt(SELECT_SET_NODE_CNT, job_desc);
+	select_g_get_jobinfo(job_desc->select_jobinfo,
+			     SELECT_DATA_MAX_PROCS, &max_procs);
+	debug3("after alteration asking for nodes %u-%u procs %u-%u", 
+	       job_desc->min_nodes, job_desc->max_nodes,
+	       job_desc->num_procs, max_procs);
+	
 	/* check if select partition has sufficient resources to satisfy
 	 * the request */
 
@@ -2033,7 +2123,7 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 					      &req_bitmap);
 		if (error_code) {
 			error_code = ESLURM_INVALID_NODE_NAME;
-			goto cleanup;
+			goto cleanup_fail;
 		}
 		if (job_desc->contiguous)
 			bit_fill_gaps(req_bitmap);
@@ -2042,7 +2132,7 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 			     "partition %s", 
 			     job_desc->req_nodes, part_ptr->name);
 			error_code = ESLURM_REQUESTED_NODES_NOT_IN_PARTITION;
-			goto cleanup;
+			goto cleanup_fail;
 		}
 		
 		i = bit_set_count(req_bitmap);
@@ -2059,7 +2149,7 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 					      &exc_bitmap);
 		if (error_code) {
 			error_code = ESLURM_INVALID_NODE_NAME;
-			goto cleanup;
+			goto cleanup_fail;
 		}
 	}
 	if (exc_bitmap && req_bitmap) {
@@ -2074,7 +2164,7 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 		if (first_set != -1) {
 			info("Job's required and excluded node lists overlap");
 			error_code = ESLURM_INVALID_NODE_NAME;
-			goto cleanup;
+			goto cleanup_fail;
 		}
 	}
 
@@ -2098,7 +2188,7 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 			info("MinNodes(%d) > GeometryNodes(%d)", 
 			     job_desc->min_nodes, tot);
 			error_code = ESLURM_TOO_MANY_REQUESTED_CPUS;
-			goto cleanup;
+			goto cleanup_fail;
 		}
 		job_desc->min_nodes = tot;
 	}
@@ -2133,7 +2223,7 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 		     job_desc->num_procs, part_ptr->name, 
 		     part_ptr->total_cpus);
 		error_code = ESLURM_TOO_MANY_REQUESTED_CPUS;
-		goto cleanup;
+		goto cleanup_fail;
 	}
 	total_nodes = part_ptr->total_nodes;
 	select_g_alter_node_cnt(SELECT_APPLY_NODE_MIN_OFFSET,
@@ -2143,14 +2233,14 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 		     job_desc->min_nodes, part_ptr->name, 
 		     part_ptr->total_nodes);
 		error_code = ESLURM_TOO_MANY_REQUESTED_NODES;
-		goto cleanup;
+		goto cleanup_fail;
 	}
 	if (job_desc->max_nodes && 
 	    (job_desc->max_nodes < job_desc->min_nodes)) {
 		info("Job's max_nodes(%u) < min_nodes(%u)",
 		     job_desc->max_nodes, job_desc->min_nodes);
 		error_code = ESLURM_TOO_MANY_REQUESTED_NODES;
-		goto cleanup;
+		goto cleanup_fail;
 	}
 
 	license_list = license_job_validate(job_desc->licenses, &valid);
@@ -2158,7 +2248,7 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 		info("Job's requested licenses are invalid: %s", 
 		     job_desc->licenses);
 		error_code = ESLURM_INVALID_LICENSES;
-		goto cleanup;
+		goto cleanup_fail;
 	}
 
 	if ((error_code =_validate_job_create_req(job_desc)))
@@ -2170,7 +2260,7 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 						       &req_bitmap,
 						       &exc_bitmap))) {
 		error_code = ESLURM_ERROR_ON_DESC_TO_RECORD_COPY;
-		goto cleanup;
+		goto cleanup_fail;
 	}
 
 	job_ptr = *job_pptr;
@@ -2178,19 +2268,19 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 	job_ptr->assoc_ptr = (void *) assoc_ptr;
 	if (update_job_dependency(job_ptr, job_desc->dependency)) {
 		error_code = ESLURM_DEPENDENCY;
-		goto cleanup;
+		goto cleanup_fail;
+	}
+	if (build_feature_list(job_ptr)) {
+		error_code = ESLURM_INVALID_FEATURE;
+		goto cleanup_fail;
 	}
 
 	if (job_desc->script
 	    &&  (!will_run)) {	/* don't bother with copy if just a test */
 		if ((error_code = _copy_job_desc_to_file(job_desc,
 							 job_ptr->job_id))) {
-			job_ptr->job_state = JOB_FAILED;
-			job_ptr->exit_code = 1;
-			job_ptr->state_reason = FAIL_SYSTEM;
-			job_ptr->start_time = job_ptr->end_time = time(NULL);
 			error_code = ESLURM_WRITING_TO_FILE;
-			goto cleanup;
+			goto cleanup_fail;
 		}
 		job_ptr->batch_flag = 1;
 	} else
@@ -2237,6 +2327,19 @@ cleanup:
 	FREE_NULL_BITMAP(req_bitmap);
 	FREE_NULL_BITMAP(exc_bitmap);
 	return error_code;
+
+cleanup_fail:
+	if (job_ptr) {
+		job_ptr->job_state = JOB_FAILED;
+		job_ptr->exit_code = 1;
+		job_ptr->state_reason = FAIL_SYSTEM;
+		job_ptr->start_time = job_ptr->end_time = time(NULL);
+	}
+	if (license_list)
+		list_destroy(license_list);
+	FREE_NULL_BITMAP(req_bitmap);
+	FREE_NULL_BITMAP(exc_bitmap);
+	return error_code;
 }
 
 /* Perform some size checks on strings we store to prevent
@@ -4119,16 +4222,41 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 		if ((!IS_JOB_PENDING(job_ptr)) || (detail_ptr == NULL))
 			error_code = ESLURM_DISABLED;
 		else if (super_user) {
-			xfree(detail_ptr->features);
 			if (job_specs->features[0] != '\0') {
+				char *old_features = detail_ptr->features;
+				List old_list = detail_ptr->feature_list;
 				detail_ptr->features = job_specs->features;
-				job_specs->features = NULL;
-				info("update_job: setting features to %s for "
-				     "job_id %u", job_specs->features, 
-				     job_specs->job_id);
+				detail_ptr->feature_list = NULL;
+				if (build_feature_list(job_ptr)) {
+					info("update_job: invalid features"
+				 	     "(%s) for job_id %u", 
+					     job_specs->features, 
+				  	     job_specs->job_id);
+					if (detail_ptr->feature_list) {
+						list_destroy(detail_ptr->
+							     feature_list);
+					}
+					detail_ptr->features = old_features;
+					detail_ptr->feature_list = old_list;
+					error_code = ESLURM_INVALID_FEATURE;
+				} else {
+					info("update_job: setting features to "
+				 	     "%s for job_id %u", 
+					     job_specs->features, 
+				  	     job_specs->job_id);
+					xfree(old_features);
+					if (old_list)
+						list_destroy(old_list);
+					job_specs->features = NULL;
+				}
 			} else {
 				info("update_job: cleared features for job %u",
 				     job_specs->job_id);
+				xfree(detail_ptr->features);
+				if (detail_ptr->feature_list) {
+					list_destroy(detail_ptr->feature_list);
+					detail_ptr->feature_list = NULL;
+				}
 			}
 		} else {
 			error("Attempt to change features for job %u",
@@ -4986,6 +5114,9 @@ extern void job_completion_logger(struct job_record  *job_ptr)
 	int base_state;
 	xassert(job_ptr);
 
+	if (accounting_enforce == ACCOUNTING_ENFORCE_WITH_LIMITS)
+		_acct_remove_job_submit(job_ptr);
+
 	/* make sure all parts of the job are notified */
 	srun_job_complete(job_ptr);
 	
@@ -5002,6 +5133,17 @@ extern void job_completion_logger(struct job_record  *job_ptr)
 	}
 
 	g_slurm_jobcomp_write(job_ptr);
+
+	/* 
+	 * This means the job wasn't ever eligible, but we want to
+	 * keep track of all jobs, so we will set the db_inx to
+	 * INFINITE and the database will understand what happened.
+	 */ 
+	if(!job_ptr->nodes && !job_ptr->db_index) {
+		jobacct_storage_g_job_start(
+			acct_db_conn, slurmctld_cluster_name, job_ptr);
+	}
+
 	jobacct_storage_g_job_complete(acct_db_conn, job_ptr);
 }
 
@@ -5039,7 +5181,8 @@ extern bool job_independent(struct job_record *job_ptr)
 			 * order to calculate reserved time (a measure of
 			 * system over-subscription), job really is not
 			 * starting now */
-			jobacct_storage_g_job_start(acct_db_conn, job_ptr);
+			jobacct_storage_g_job_start(
+				acct_db_conn, slurmctld_cluster_name, job_ptr);
 		}
 		return true;
 	} else if (rc == 1) {
@@ -5537,51 +5680,189 @@ extern void update_job_nodes_completing(void)
 
 static bool _validate_acct_policy(job_desc_msg_t *job_desc,
 				  struct part_record *part_ptr,
-				  acct_association_rec_t *assoc_ptr)
+				  acct_association_rec_t *assoc_in)
 {
 	uint32_t time_limit;
+	acct_association_rec_t *assoc_ptr = assoc_in;
+	int parent = 0;
+	int timelimit_set = 0;
+	int max_nodes_set = 0;
+	char *user_name = assoc_ptr->user;
+
+	while(assoc_ptr) {
+		/* for validation we don't need to look at 
+		 * assoc_ptr->grp_cpu_mins.
+		 */
 
-	//log_assoc_rec(assoc_ptr);
-	if ((assoc_ptr->max_wall_duration_per_job != NO_VAL) &&
-	    (assoc_ptr->max_wall_duration_per_job != INFINITE)) {
-		time_limit = assoc_ptr->max_wall_duration_per_job;
-		if (job_desc->time_limit == NO_VAL) {
-			if (part_ptr->max_time == INFINITE)
-				job_desc->time_limit = time_limit;
-			else
-				job_desc->time_limit = MIN(time_limit, 
-							   part_ptr->max_time);
-		} else if (job_desc->time_limit > time_limit) {
-			info("job for user %u: "
-			     "time limit %u exceeds account max %u",
+		/* NOTE: We can't enforce assoc_ptr->grp_cpus at this
+		 * time because we don't have access to a CPU count for the job
+		 * due to how all of the job's specifications interact */
+
+		/* for validation we don't need to look at 
+		 * assoc_ptr->grp_jobs.
+		 */
+
+		if ((assoc_ptr->grp_nodes != NO_VAL) &&
+		    (assoc_ptr->grp_nodes != INFINITE)) {
+			if (job_desc->min_nodes > assoc_ptr->grp_nodes) {
+				info("job submit for user %s(%u): "
+				     "min node request %u exceeds "
+				     "group max node limit %u for account %s",
+				     user_name,
+				     job_desc->user_id, 
+				     job_desc->min_nodes, 
+				     assoc_ptr->grp_nodes,
+				     assoc_ptr->acct);
+				return false;
+			} else if (job_desc->max_nodes == 0
+				   || (max_nodes_set 
+				       && (job_desc->max_nodes 
+					   > assoc_ptr->grp_nodes))) {
+				job_desc->max_nodes = assoc_ptr->grp_nodes;
+				max_nodes_set = 1;
+			} else if (job_desc->max_nodes > 
+				   assoc_ptr->grp_nodes) {
+				info("job submit for user %s(%u): "
+				     "max node changed %u -> %u because "
+				     "of account limit",
+				     user_name,
+				     job_desc->user_id, 
+				     job_desc->max_nodes, 
+				     assoc_ptr->grp_nodes);
+				job_desc->max_nodes = assoc_ptr->grp_nodes;
+			}
+		}
+
+		if ((assoc_ptr->grp_submit_jobs != NO_VAL) &&
+		    (assoc_ptr->grp_submit_jobs != INFINITE) &&
+		    (assoc_ptr->used_submit_jobs 
+		     >= assoc_ptr->grp_submit_jobs)) {
+			info("job submit for user %s(%u): "
+			     "group max submit job limit exceded %u "
+			     "for account %s",
+			     user_name,
 			     job_desc->user_id, 
-			     job_desc->time_limit, time_limit);
+			     assoc_ptr->grp_submit_jobs,
+			     assoc_ptr->acct);
 			return false;
 		}
-	}
 
-	if ((assoc_ptr->max_nodes_per_job != NO_VAL) &&
-	    (assoc_ptr->max_nodes_per_job != INFINITE)) {
-		if (job_desc->max_nodes == 0)
-			job_desc->max_nodes = assoc_ptr->max_nodes_per_job;
-		else if (job_desc->max_nodes > assoc_ptr->max_nodes_per_job) {
-			if (job_desc->min_nodes > 
-			    assoc_ptr->max_nodes_per_job) {
-				info("job %u for user %u: "
-				     "node limit %u exceeds account max %u",
-				     job_desc->job_id, job_desc->user_id, 
+		if ((assoc_ptr->grp_wall != NO_VAL) &&
+		    (assoc_ptr->grp_wall != INFINITE)) {
+			time_limit = assoc_ptr->grp_wall;
+			if (job_desc->time_limit == NO_VAL) {
+				if (part_ptr->max_time == INFINITE)
+					job_desc->time_limit = time_limit;
+				else 
+					job_desc->time_limit =
+						MIN(time_limit, 
+						    part_ptr->max_time);
+				timelimit_set = 1;
+			} else if (timelimit_set && 
+				   job_desc->time_limit > time_limit) {
+				job_desc->time_limit = time_limit;
+			} else if (job_desc->time_limit > time_limit) {
+				info("job submit for user %s(%u): "
+				     "time limit %u exceeds group "
+				     "time limit %u for account %s",
+				     user_name,
+				     job_desc->user_id, 
+				     job_desc->time_limit, time_limit,
+				     assoc_ptr->acct);
+				return false;
+			}
+		}
+		
+		/* We don't need to look at the regular limits for
+		 * parents since we have pre-propogated them, so just
+		 * continue with the next parent
+		 */
+		if(parent) {
+			assoc_ptr = assoc_ptr->parent_assoc_ptr;
+			continue;
+		} 
+		
+		/* for validation we don't need to look at 
+		 * assoc_ptr->max_cpu_mins_pj.
+		 */
+		
+		/* NOTE: We can't enforce assoc_ptr->max_cpus at this
+		 * time because we don't have access to a CPU count for the job
+		 * due to how all of the job's specifications interact */
+
+		/* for validation we don't need to look at 
+		 * assoc_ptr->max_jobs.
+		 */
+		
+		if ((assoc_ptr->max_nodes_pj != NO_VAL) &&
+		    (assoc_ptr->max_nodes_pj != INFINITE)) {
+			if (job_desc->min_nodes > assoc_ptr->max_nodes_pj) {
+				info("job submit for user %s(%u): "
+				     "min node limit %u exceeds "
+				     "account max %u",
+				     user_name,
+				     job_desc->user_id, 
 				     job_desc->min_nodes, 
-				     assoc_ptr->max_nodes_per_job);
+				     assoc_ptr->max_nodes_pj);
 				return false;
+			} else if (job_desc->max_nodes == 0
+				   || (max_nodes_set 
+				       && (job_desc->max_nodes 
+					   > assoc_ptr->max_nodes_pj))) {
+				job_desc->max_nodes = assoc_ptr->max_nodes_pj;
+				max_nodes_set = 1;
+			} else if (job_desc->max_nodes > 
+				   assoc_ptr->max_nodes_pj) {
+				info("job submit for user %s(%u): "
+				     "max node changed %u -> %u because "
+				     "of account limit",
+				     user_name,
+				     job_desc->user_id, 
+				     job_desc->max_nodes, 
+				     assoc_ptr->max_nodes_pj);
+				job_desc->max_nodes = assoc_ptr->max_nodes_pj;
 			}
-			job_desc->max_nodes = assoc_ptr->max_nodes_per_job;
 		}
+		
+		if ((assoc_ptr->max_submit_jobs != NO_VAL) &&
+		    (assoc_ptr->max_submit_jobs != INFINITE) &&
+		    (assoc_ptr->used_submit_jobs 
+		     >= assoc_ptr->max_submit_jobs)) {
+			info("job submit for user %s(%u): "
+			     "account max submit job limit exceded %u",
+			     user_name,
+			     job_desc->user_id, 
+			     assoc_ptr->max_submit_jobs);
+			return false;
+		}
+		
+		if ((assoc_ptr->max_wall_pj != NO_VAL) &&
+		    (assoc_ptr->max_wall_pj != INFINITE)) {
+			time_limit = assoc_ptr->max_wall_pj;
+			if (job_desc->time_limit == NO_VAL) {
+				if (part_ptr->max_time == INFINITE)
+					job_desc->time_limit = time_limit;
+				else 
+					job_desc->time_limit =
+						MIN(time_limit, 
+						    part_ptr->max_time);
+				timelimit_set = 1;
+			} else if (timelimit_set && 
+				   job_desc->time_limit > time_limit) {
+				job_desc->time_limit = time_limit;
+			} else if (job_desc->time_limit > time_limit) {
+				info("job submit for user %s(%u): "
+				     "time limit %u exceeds account max %u",
+				     user_name,
+				     job_desc->user_id, 
+				     job_desc->time_limit, time_limit);
+				return false;
+			}
+		}
+		
+		assoc_ptr = assoc_ptr->parent_assoc_ptr;
+		parent = 1;
 	}
-
-	/* NOTE: We can't enforce assoc_ptr->max_cpu_secs_per_job at this
-	 * time because we don't have access to a CPU count for the job
-	 * due to how all of the job's specifications interact */
-
 	return true;
 }
 
@@ -5643,9 +5924,17 @@ extern int update_job_account(char *module, struct job_record *job_ptr,
 		info("%s: invalid account %s for job_id %u",
 		     module, new_account, job_ptr->job_id);
 		return ESLURM_INVALID_ACCOUNT;
+	} else if(association_based_accounting 
+		  && !assoc_ptr && !accounting_enforce) {
+		/* if not enforcing associations we want to look for
+		   the default account and use it to avoid getting
+		   trash in the accounting records.
+		*/
+		assoc_rec.acct = NULL;
+		assoc_mgr_fill_in_assoc(acct_db_conn, &assoc_rec,
+					accounting_enforce, &assoc_ptr);
 	}
 
-
 	xfree(job_ptr->account);
 	if (assoc_rec.acct[0] != '\0') {
 		job_ptr->account = xstrdup(assoc_rec.acct);
@@ -5660,9 +5949,33 @@ extern int update_job_account(char *module, struct job_record *job_ptr,
 
 	if (job_ptr->details && job_ptr->details->begin_time) {
 		/* Update account associated with the eligible time */
-		jobacct_storage_g_job_start(acct_db_conn, job_ptr);
+		jobacct_storage_g_job_start(
+			acct_db_conn, slurmctld_cluster_name, job_ptr);
 	}
 	last_job_update = time(NULL);
 
 	return SLURM_SUCCESS;
 }
+
+extern int send_jobs_to_accounting(time_t event_time)
+{
+	ListIterator itr = NULL;
+	struct job_record *job_ptr;
+	/* send jobs in pending or running state */
+	itr = list_iterator_create(job_list);
+	while ((job_ptr = list_next(itr))) {
+		/* we only want active, un accounted for jobs */
+		if(job_ptr->db_index && job_ptr->job_state > JOB_SUSPENDED) 
+			continue;
+		debug("first reg: starting job %u in accounting",
+		      job_ptr->job_id);
+		jobacct_storage_g_job_start(
+			acct_db_conn, slurmctld_cluster_name, job_ptr);
+
+		if(job_ptr->job_state == JOB_SUSPENDED) 
+			jobacct_storage_g_job_suspend(acct_db_conn, job_ptr);
+	}
+	list_iterator_destroy(itr);
+
+	return SLURM_SUCCESS;
+}
diff --git a/src/slurmctld/job_scheduler.c b/src/slurmctld/job_scheduler.c
index 20863ba58..6e037d285 100644
--- a/src/slurmctld/job_scheduler.c
+++ b/src/slurmctld/job_scheduler.c
@@ -3,6 +3,7 @@
  *	Note there is a global job list (job_list)
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
  *  LLNL-CODE-402394.
@@ -67,8 +68,58 @@
 #define MAX_RETRIES 10
 
 static void _depend_list_del(void *dep_ptr);
+static void _feature_list_delete(void *x);
+static int  _valid_feature_list(uint32_t job_id, List feature_list);
+static int  _valid_node_feature(char *feature);
 static char **_xduparray(uint16_t size, char ** array);
 
+
+/*
+ * _build_user_job_list - build list of jobs for a given user
+ *			  and an optional job name
+ * IN  user_id - user id
+ * IN  job_name - job name constraint
+ * OUT job_queue - pointer to job queue
+ * RET number of entries in job_queue
+ * NOTE: the buffer at *job_queue must be xfreed by the caller
+ */
+static int _build_user_job_list(uint32_t user_id,char* job_name,
+			        struct job_queue **job_queue)
+{
+	ListIterator job_iterator;
+	struct job_record *job_ptr = NULL;
+	int job_buffer_size, job_queue_size;
+	struct job_queue *my_job_queue;
+
+	/* build list pending jobs */
+	job_buffer_size = job_queue_size = 0;
+	job_queue[0] = my_job_queue = NULL;
+ 
+	job_iterator = list_iterator_create(job_list);
+	while ((job_ptr = (struct job_record *) list_next(job_iterator))) {
+		xassert (job_ptr->magic == JOB_MAGIC);
+		if (job_ptr->user_id != user_id)
+			continue;
+		if (job_name && strcmp(job_name,job_ptr->name))
+			continue;
+		if (job_buffer_size <= job_queue_size) {
+			job_buffer_size += 200;
+			xrealloc(my_job_queue, job_buffer_size *
+				 sizeof(struct job_queue));
+		}
+		my_job_queue[job_queue_size].job_ptr = job_ptr;
+		my_job_queue[job_queue_size].job_priority = job_ptr->priority;
+		my_job_queue[job_queue_size].part_priority = 
+				job_ptr->part_ptr->priority;
+		job_queue_size++;
+	}
+	list_iterator_destroy(job_iterator);
+ 
+	job_queue[0] = my_job_queue;
+	return job_queue_size;
+}
+
+
 /* 
  * build_job_queue - build (non-priority ordered) list of pending jobs
  * OUT job_queue - pointer to job queue
@@ -195,7 +246,6 @@ static bool _failed_partition(struct part_record *part_ptr,
 	return false;
 }
 
-#ifndef HAVE_BG
 /* Add a partition to the failed_parts array, reserving its nodes
  * from use by lower priority jobs. Also flags all partitions with
  * nodes overlapping this partition. */
@@ -223,7 +273,6 @@ static void _add_failed_partition(struct part_record *failed_part_ptr,
 
 	*failed_part_cnt = count;
 }
-#endif
 
 /* 
  * schedule - attempt to schedule all pending jobs
@@ -248,21 +297,25 @@ extern int schedule(void)
 	char *ionodes = NULL;
 	char tmp_char[256];
 #endif
+	static bool backfill_sched = false;
+	static bool sched_test = false;
 	static bool wiki_sched = false;
-	static bool wiki_sched_test = false;
 	time_t now = time(NULL);
 
 	DEF_TIMERS;
 
 	START_TIMER;
-	/* don't bother trying to avoid fragmentation with sched/wiki */
-	if (!wiki_sched_test) {
+	if (!sched_test) {
 		char *sched_type = slurm_get_sched_type();
+		/* On BlueGene, do FIFO only with sched/backfill */
+		if (strcmp(sched_type, "sched/backfill") == 0)
+			backfill_sched = true;
+		/* Disable avoiding of fragmentation with sched/wiki */
 		if ((strcmp(sched_type, "sched/wiki") == 0)
 		||  (strcmp(sched_type, "sched/wiki2") == 0))
 			wiki_sched = true;
 		xfree(sched_type);
-		wiki_sched_test = true;
+		sched_test = true;
 	}
 
 	lock_slurmctld(job_write_lock);
@@ -320,23 +373,24 @@ extern int schedule(void)
 
 		error_code = select_nodes(job_ptr, false, NULL);
 		if (error_code == ESLURM_NODES_BUSY) {
-#ifndef HAVE_BG 	/* keep trying to schedule jobs in partition */
-			/* While we use static partitiioning on Blue Gene, 
-			 * each job can be scheduled independently without 
-			 * impacting other jobs with different characteristics
-			 * (e.g. node-use [virtual or coprocessor] or conn-type
-			 * [mesh, torus, or nav]). Because of this we sort and 
-			 * then try to schedule every pending job. This does 
-			 * increase the overhead of this job scheduling cycle, 
-			 * but the only way to effectively avoid this is to 
-			 * define each SLURM partition as containing a 
-			 * single Blue Gene job partition type (e.g. 
-			 * group all Blue Gene job partitions of type 
-			 * 2x2x2 coprocessor mesh into a single SLURM
-			 * partition, say "co-mesh-222") */
-			_add_failed_partition(job_ptr->part_ptr, failed_parts,
-				      &failed_part_cnt);
+			bool fail_by_part = true;
+#ifdef HAVE_BG
+			/* When we use static or overlap partitioning on
+			 * BlueGene, each job can possibly be scheduled
+			 * independently, without impacting other jobs of
+			 * different sizes. Therefor we sort and try to
+			 * schedule every pending job unless the backfill
+			 * scheduler is configured. */
+			if (!backfill_sched)
+				fail_by_part = false;
 #endif
+			if (fail_by_part) {
+		 		/* do not schedule more jobs 
+				 * in this partition */
+				_add_failed_partition(job_ptr->part_ptr, 
+						      failed_parts, 
+						      &failed_part_cnt);
+			}
 		} else if (error_code == SLURM_SUCCESS) {	
 			/* job initiated */
 			last_job_update = now;
@@ -538,7 +592,11 @@ extern int make_batch_job_cred(batch_job_launch_msg_t *launch_msg_ptr,
 	cred_arg.jobid     = launch_msg_ptr->job_id;
 	cred_arg.stepid    = launch_msg_ptr->step_id;
 	cred_arg.uid       = launch_msg_ptr->uid;
+#ifdef HAVE_FRONT_END
+	cred_arg.hostlist  = node_record_table_ptr[0].name;
+#else
 	cred_arg.hostlist  = launch_msg_ptr->nodes;
+#endif
 	if (job_ptr->details == NULL)
 		cred_arg.job_mem = 0;
 	else if (job_ptr->details->job_min_memory & MEM_PER_CPU) {
@@ -581,7 +639,11 @@ extern void print_job_dependency(struct job_record *job_ptr)
 	if (!depend_iter)
 		fatal("list_iterator_create memory allocation failure");
 	while ((dep_ptr = list_next(depend_iter))) {
-		if      (dep_ptr->depend_type == SLURM_DEPEND_AFTER)
+		if      (dep_ptr->depend_type == SLURM_DEPEND_SINGLETON) {
+			info("  singleton");
+			continue;
+		}
+		else if (dep_ptr->depend_type == SLURM_DEPEND_AFTER)
 			dep_str = "after";
 		else if (dep_ptr->depend_type == SLURM_DEPEND_AFTER_ANY)
 			dep_str = "afterany";
@@ -607,6 +669,9 @@ extern int test_job_dependency(struct job_record *job_ptr)
 	ListIterator depend_iter;
 	struct depend_spec *dep_ptr;
 	bool failure = false;
+ 	struct job_queue *job_queue = NULL;
+ 	int i, now, job_queue_size = 0;
+ 	struct job_record *qjob_ptr;
 
 	if ((job_ptr->details == NULL) ||
 	    (job_ptr->details->depend_list == NULL))
@@ -616,7 +681,33 @@ extern int test_job_dependency(struct job_record *job_ptr)
 	if (!depend_iter)
 		fatal("list_iterator_create memory allocation failure");
 	while ((dep_ptr = list_next(depend_iter))) {
-		if (dep_ptr->job_ptr->job_id != dep_ptr->job_id) {
+ 	        if ((dep_ptr->depend_type == SLURM_DEPEND_SINGLETON) &&
+ 		    job_ptr->name) {
+ 		        /* get user jobs with the same user and name */
+ 			job_queue_size = _build_user_job_list(job_ptr->user_id,
+							      job_ptr->name,
+							      &job_queue);
+ 			now = 1;
+ 			for (i=0; i<job_queue_size; i++) {
+				qjob_ptr = job_queue[i].job_ptr;
+				/* already running/suspended job or previously
+				 * submitted pending job */
+				if ((qjob_ptr->job_state == JOB_RUNNING) ||
+				    (qjob_ptr->job_state == JOB_SUSPENDED) ||
+				    ((qjob_ptr->job_state == JOB_PENDING) &&
+				     (qjob_ptr->job_id < job_ptr->job_id))) {
+					now = 0;
+					break;
+ 				}
+ 			}
+ 			if (job_queue_size > 0)
+				xfree(job_queue);
+			/* job can run now, delete dependency */
+ 			if (now)
+ 				list_delete_item(depend_iter);
+ 			else
+				break;
+ 		} else if (dep_ptr->job_ptr->job_id != dep_ptr->job_id) {
 			/* job is gone, dependency lifted */
 			list_delete_item(depend_iter);
 		} else if (dep_ptr->depend_type == SLURM_DEPEND_AFTER) {
@@ -695,6 +786,26 @@ extern int update_job_dependency(struct job_record *job_ptr, char *new_depend)
 	new_depend_list = list_create(_depend_list_del);
 	/* validate new dependency string */
 	while (rc == SLURM_SUCCESS) {
+
+ 	        /* test singleton dependency flag */
+ 	        if ( strncasecmp(tok, "singleton", 9) == 0 ) {
+			depend_type = SLURM_DEPEND_SINGLETON;
+			dep_ptr = xmalloc(sizeof(struct depend_spec));
+			dep_ptr->depend_type = depend_type;
+			/* dep_ptr->job_id = 0;		set by xmalloc */
+			/* dep_ptr->job_ptr = NULL;	set by xmalloc */
+			if (!list_append(new_depend_list, dep_ptr)) {
+				fatal("list_append memory allocation "
+				      "failure for singleton");
+			}
+			if ( *(tok + 9 ) == ',' ) {
+				tok+=10;
+				continue;
+			}
+			else
+				break;
+ 		}
+
 		sep_ptr = strchr(tok, ':');
 		if ((sep_ptr == NULL) && (job_id == 0)) {
 			job_id = strtol(tok, &sep_ptr, 10);
@@ -707,7 +818,8 @@ extern int update_job_dependency(struct job_record *job_ptr, char *new_depend)
 			dep_job_ptr = find_job_record(job_id);
 			if (!dep_job_ptr)	/* assume already done */
 				break;
-			snprintf(dep_buf, sizeof(dep_buf), "afterany:%u", job_id);
+			snprintf(dep_buf, sizeof(dep_buf), 
+				 "afterany:%u", job_id);
 			new_depend = dep_buf;
 			dep_ptr = xmalloc(sizeof(struct depend_spec));
 			dep_ptr->depend_type = SLURM_DEPEND_AFTER_ANY;
@@ -880,3 +992,193 @@ extern int job_start_data(job_desc_msg_t *job_desc_msg,
 	}
 
 }
+
+/*
+ * build_feature_list - Translate a job's feature string into a feature_list
+ * IN  details->features
+ * OUT details->feature_list
+ * RET error code
+ */
+extern int build_feature_list(struct job_record *job_ptr)
+{
+	struct job_details *detail_ptr = job_ptr->details;
+	char *tmp_requested, *str_ptr1, *str_ptr2, *feature = NULL;
+	int bracket = 0, count = 0, i;
+	bool have_count = false, have_or = false;
+	struct feature_record *feat;
+
+	if (detail_ptr->features == NULL)	/* no constraints */
+		return SLURM_SUCCESS;
+	if (detail_ptr->feature_list)		/* already processed */
+		return SLURM_SUCCESS;
+
+	tmp_requested = xstrdup(detail_ptr->features);
+	str_ptr1 = tmp_requested;
+	detail_ptr->feature_list = list_create(_feature_list_delete);
+	for (i=0; ; i++) {
+		if (tmp_requested[i] == '*') {
+			tmp_requested[i] = '\0';
+			have_count = true;
+			count = strtol(&tmp_requested[i+1], &str_ptr2, 10);
+			if ((feature == NULL) || (count <= 0)) {
+				info("Job %u invalid constraint %s", 
+					job_ptr->job_id, detail_ptr->features);
+				xfree(tmp_requested);
+				return ESLURM_INVALID_FEATURE;
+			}
+			i = str_ptr2 - tmp_requested - 1;
+		} else if (tmp_requested[i] == '&') {
+			tmp_requested[i] = '\0';
+			if ((feature == NULL) || (bracket != 0)) {
+				info("Job %u invalid constraint %s", 
+					job_ptr->job_id, detail_ptr->features);
+				xfree(tmp_requested);
+				return ESLURM_INVALID_FEATURE;
+			}
+			feat = xmalloc(sizeof(struct feature_record));
+			feat->name = xstrdup(feature);
+			feat->count = count;
+			feat->op_code = FEATURE_OP_AND;
+			list_append(detail_ptr->feature_list, feat);
+			feature = NULL;
+			count = 0;
+		} else if (tmp_requested[i] == '|') {
+			tmp_requested[i] = '\0';
+			have_or = true;
+			if (feature == NULL) {
+				info("Job %u invalid constraint %s", 
+					job_ptr->job_id, detail_ptr->features);
+				xfree(tmp_requested);
+				return ESLURM_INVALID_FEATURE;
+			}
+			feat = xmalloc(sizeof(struct feature_record));
+			feat->name = xstrdup(feature);
+			feat->count = count;
+			if (bracket)
+				feat->op_code = FEATURE_OP_XOR;
+			else
+				feat->op_code = FEATURE_OP_OR;
+			list_append(detail_ptr->feature_list, feat);
+			feature = NULL;
+			count = 0;
+		} else if (tmp_requested[i] == '[') {
+			tmp_requested[i] = '\0';
+			if ((feature != NULL) || bracket) {
+				info("Job %u invalid constraint %s", 
+					job_ptr->job_id, detail_ptr->features);
+				xfree(tmp_requested);
+				return ESLURM_INVALID_FEATURE;
+			}
+			bracket++;
+		} else if (tmp_requested[i] == ']') {
+			tmp_requested[i] = '\0';
+			if ((feature == NULL) || (bracket == 0)) {
+				info("Job %u invalid constraint %s", 
+					job_ptr->job_id, detail_ptr->features);
+				xfree(tmp_requested);
+				return ESLURM_INVALID_FEATURE;
+			}
+			bracket = 0;
+		} else if (tmp_requested[i] == '\0') {
+			if (feature) {
+				feat = xmalloc(sizeof(struct feature_record));
+				feat->name = xstrdup(feature);
+				feat->count = count;
+				feat->op_code = FEATURE_OP_END;
+				list_append(detail_ptr->feature_list, feat);
+			}
+			break;
+		} else if (feature == NULL) {
+			feature = &tmp_requested[i];
+		}
+	}
+	xfree(tmp_requested);
+	if (have_count && have_or) {
+		info("Job %u invalid constraint (OR with feature count): %s", 
+			job_ptr->job_id, detail_ptr->features);
+		return ESLURM_INVALID_FEATURE;
+	}
+
+	return _valid_feature_list(job_ptr->job_id, detail_ptr->feature_list);
+}
+
+static void _feature_list_delete(void *x)
+{
+	struct feature_record *feature = (struct feature_record *)x;
+	xfree(feature->name);
+	xfree(feature);
+}
+
+static int _valid_feature_list(uint32_t job_id, List feature_list)
+{
+	ListIterator feat_iter;
+	struct feature_record *feat_ptr;
+	char *buf = NULL, tmp[16];
+	int bracket = 0;
+	int rc = SLURM_SUCCESS;
+
+	if (feature_list == NULL) {
+		debug2("Job %u feature list is empty", job_id);
+		return rc;
+	}
+
+	feat_iter = list_iterator_create(feature_list);
+	while((feat_ptr = (struct feature_record *)list_next(feat_iter))) {
+		if (feat_ptr->op_code == FEATURE_OP_XOR) {
+			if (bracket == 0)
+				xstrcat(buf, "[");
+			bracket = 1;
+		}
+		xstrcat(buf, feat_ptr->name);
+		if (rc == SLURM_SUCCESS)
+			rc = _valid_node_feature(feat_ptr->name);
+		if (feat_ptr->count) {
+			snprintf(tmp, sizeof(tmp), "*%u", feat_ptr->count);
+			xstrcat(buf, tmp);
+		}
+		if (bracket && (feat_ptr->op_code != FEATURE_OP_XOR)) {
+			xstrcat(buf, "]");
+			bracket = 0;
+		}
+		if (feat_ptr->op_code == FEATURE_OP_AND)
+			xstrcat(buf, "&");
+		else if ((feat_ptr->op_code == FEATURE_OP_OR) ||
+			 (feat_ptr->op_code == FEATURE_OP_XOR))
+			xstrcat(buf, "|");
+	}
+	list_iterator_destroy(feat_iter);
+	if (rc == SLURM_SUCCESS)
+		debug("Job %u feature list: %s", job_id, buf);
+	else
+		info("Job %u has invalid feature list: %s", job_id, buf);
+	xfree(buf);
+	return rc;
+}
+
+static int _valid_node_feature(char *feature)
+{
+	int i, rc = ESLURM_INVALID_FEATURE;
+	ListIterator config_iterator;
+	struct config_record *config_ptr;
+
+	config_iterator = list_iterator_create(config_list);
+	if (config_iterator == NULL)
+		fatal("list_iterator_create malloc failure");
+	while ((config_ptr = (struct config_record *) 
+			list_next(config_iterator))) {
+		if (config_ptr->feature_array == NULL)
+			continue;
+		for (i=0; ; i++) {
+			if (config_ptr->feature_array[i] == NULL)
+				break;
+			if (strcmp(feature, config_ptr->feature_array[i]))
+				continue;
+			rc = SLURM_SUCCESS;
+			break;
+		}
+		if (rc == SLURM_SUCCESS)
+			break;
+	}
+	list_iterator_destroy(config_iterator);
+	return rc;
+}
diff --git a/src/slurmctld/job_scheduler.h b/src/slurmctld/job_scheduler.h
index f09cb7e72..17b39ec51 100644
--- a/src/slurmctld/job_scheduler.h
+++ b/src/slurmctld/job_scheduler.h
@@ -2,7 +2,8 @@
  *  job_scheduler.h - data structures and function definitions for scheduling
  *	of pending jobs in priority order
  *****************************************************************************
- *  Copyright (C) 2002-2006 The Regents of the University of California.
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette@llnl.gov>, et. al.
  *  Derived from dsh written by Jim Garlick <garlick1@llnl.gov>
@@ -48,6 +49,14 @@ struct job_queue {
 	uint16_t part_priority;
 };
 
+/*
+ * build_feature_list - Translate a job's feature string into a feature_list
+ * IN  details->features
+ * OUT details->feature_list
+ * RET error code
+ */
+extern int build_feature_list(struct job_record *job_ptr);
+
 /* 
  * build_job_queue - build (non-priority ordered) list of pending jobs
  * OUT job_queue - pointer to job queue
diff --git a/src/slurmctld/node_mgr.c b/src/slurmctld/node_mgr.c
index e8b51b15b..fce0508e5 100644
--- a/src/slurmctld/node_mgr.c
+++ b/src/slurmctld/node_mgr.c
@@ -4,7 +4,7 @@
  *	hash table (node_hash_table), time stamp (last_node_update) and 
  *	configuration list (config_list)
  *
- *  $Id: node_mgr.c 14872 2008-08-25 16:25:28Z jette $
+ *  $Id: node_mgr.c 15324 2008-10-07 00:16:53Z da $
  *****************************************************************************
  *  Copyright (C) 2002-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -698,6 +698,7 @@ static void _list_delete_config (void *config_entry)
 	xassert(config_ptr);
 	xassert(config_ptr->magic == CONFIG_MAGIC);
 	xfree (config_ptr->feature);
+	build_config_feature_array(config_ptr);
 	xfree (config_ptr->nodes);
 	FREE_NULL_BITMAP (config_ptr->node_bitmap);
 	xfree (config_ptr);
@@ -1277,19 +1278,24 @@ static int _update_node_features(char *node_names, char *features)
 				config_ptr->feature = xstrdup(features);
 			else
 				config_ptr->feature = NULL;
+			build_config_feature_array(config_ptr);
 		} else {
 			/* partial update, split config_record */
 			new_config_ptr = create_config_record();
 			if (first_new == NULL);
 				first_new = new_config_ptr;
-			memcpy(new_config_ptr, config_ptr, 
-				sizeof(struct config_record));
+			new_config_ptr->magic       = config_ptr->magic;
+			new_config_ptr->cpus        = config_ptr->cpus;
+			new_config_ptr->sockets     = config_ptr->sockets;
+			new_config_ptr->cores       = config_ptr->cores;
+			new_config_ptr->threads     = config_ptr->threads;
+			new_config_ptr->real_memory = config_ptr->real_memory;
+			new_config_ptr->tmp_disk    = config_ptr->tmp_disk;
+			new_config_ptr->weight      = config_ptr->weight;
 			if (features[0])
 				new_config_ptr->feature = xstrdup(features);
-			else
-				config_ptr->feature = NULL;
-			new_config_ptr->node_bitmap = 
-				bit_copy(tmp_bitmap);
+			build_config_feature_array(new_config_ptr);
+			new_config_ptr->node_bitmap = bit_copy(tmp_bitmap);
 			new_config_ptr->nodes = 
 				bitmap2node_name(tmp_bitmap);
 			_update_config_ptr(tmp_bitmap, new_config_ptr);
@@ -1595,10 +1601,11 @@ extern int validate_node_specs(slurm_node_registration_status_msg_t *reg_msg)
 						      slurmctld_cluster_name,
 						      node_ptr, now);
 		} else if ((base_state == NODE_STATE_DOWN) &&
-		           (slurmctld_conf.ret2service == 1) &&
-			   (node_ptr->reason != NULL) && 
-			   (strncmp(node_ptr->reason, "Not responding", 14) 
-					== 0)) {
+			   ((slurmctld_conf.ret2service == 2) ||
+		            ((slurmctld_conf.ret2service == 1) &&
+			     (node_ptr->reason != NULL) && 
+			     (strncmp(node_ptr->reason, "Not responding", 14) 
+					== 0)))) {
 			last_node_update = time (NULL);
 			if (reg_msg->job_count) {
 				node_ptr->node_state = NODE_STATE_ALLOCATED |
@@ -2078,8 +2085,6 @@ void set_node_down (char *name, char *reason)
 		return;
 	}
 
-	_make_node_down(node_ptr, now);
-	(void) kill_running_job_by_node_name(name, false);
 	if ((node_ptr->reason == NULL)
 	||  (strncmp(node_ptr->reason, "Not responding", 14) == 0)) {
 		time_t now;
@@ -2093,6 +2098,8 @@ void set_node_down (char *name, char *reason)
 		node_ptr->reason = xstrdup(reason);
 		xstrcat(node_ptr->reason, time_buf);
 	}
+	_make_node_down(node_ptr, now);
+	(void) kill_running_job_by_node_name(name, false);
 
 	return;
 }
@@ -2446,3 +2453,55 @@ void node_fini(void)
 	xfree(node_hash_table);
 	node_record_count = 0;
 }
+
+extern int send_nodes_to_accounting(time_t event_time)
+{
+	int rc = SLURM_SUCCESS, i = 0;
+	struct node_record *node_ptr;
+	/* send nodes not in not 'up' state */
+	node_ptr = node_record_table_ptr;
+	for (i = 0; i < node_record_count; i++, node_ptr++) {
+		if (node_ptr->name == '\0'
+		    || (!(node_ptr->node_state & NODE_STATE_DRAIN)
+			&& !(node_ptr->node_state & NODE_STATE_FAIL) 
+			&& (node_ptr->node_state & NODE_STATE_BASE) 
+			!= NODE_STATE_DOWN))
+			continue;
+
+		if((rc = clusteracct_storage_g_node_down(acct_db_conn,
+							 slurmctld_cluster_name,
+							 node_ptr, event_time,
+							 NULL))
+		   == SLURM_ERROR) 
+			break;
+	}
+	return rc;
+}
+
+/* Given a config_record, clear any existing feature_array and
+ * if feature is set, then rebuild feature_array */
+extern void  build_config_feature_array(struct config_record *config_ptr)
+{
+	int i;
+	char *tmp_str, *token, *last = NULL;
+
+	/* clear any old feature_array */
+	if (config_ptr->feature_array) {
+		for (i=0; config_ptr->feature_array[i]; i++)
+			xfree(config_ptr->feature_array[i]);
+		xfree(config_ptr->feature_array);
+	}
+
+	if (config_ptr->feature) {
+		i = strlen(config_ptr->feature) + 1;	/* oversized */
+		config_ptr->feature_array = xmalloc(i * sizeof(char *));
+		tmp_str = xstrdup(config_ptr->feature);
+		i = 0;
+		token = strtok_r(tmp_str, ",", &last);
+		while (token) {
+			config_ptr->feature_array[i++] = xstrdup(token);
+			token = strtok_r(NULL, ",", &last);
+		}
+		xfree(tmp_str);
+	}
+}
diff --git a/src/slurmctld/node_scheduler.c b/src/slurmctld/node_scheduler.c
index 6392dace2..48307eb7b 100644
--- a/src/slurmctld/node_scheduler.c
+++ b/src/slurmctld/node_scheduler.c
@@ -83,18 +83,17 @@ struct node_set {		/* set of nodes with same configuration */
 	uint32_t nodes;
 	uint32_t weight;
 	char     *features;
+	char	**feature_array;	/* POINTER, NOT COPIED */
 	bitstr_t *feature_bits;
 	bitstr_t *my_bitmap;
 };
 
-static int  _build_feature_list(struct job_record *job_ptr);
 static int  _build_node_list(struct job_record *job_ptr, 
 			     struct node_set **node_set_pptr,
 			     int *node_set_size);
-static void _feature_list_delete(void *x);
 static void _filter_nodes_in_set(struct node_set *node_set_ptr,
 				 struct job_details *detail_ptr);
-static int _match_feature(char *seek, char *available);
+static int _match_feature(char *seek, struct node_set *node_set_ptr);
 static int _nodes_in_sets(bitstr_t *req_bitmap, 
 			  struct node_set * node_set_ptr, 
 			  int node_set_size);
@@ -104,9 +103,8 @@ static int _pick_best_nodes(struct node_set *node_set_ptr,
 			    struct part_record *part_ptr,
 			    uint32_t min_nodes, uint32_t max_nodes,
 			    uint32_t req_nodes, bool test_only);
-static void _print_feature_list(uint32_t job_id, List feature_list);
 static bitstr_t *_valid_features(struct job_details *detail_ptr, 
-				 char *available);
+				 struct config_record *config_ptr);
 
 
 /*
@@ -223,32 +221,23 @@ extern void deallocate_nodes(struct job_record *job_ptr, bool timeout,
 /*
  * _match_feature - determine if the desired feature is one of those available
  * IN seek - desired feature
- * IN available - comma separated list of available features
+ * IN node_set_ptr - Pointer to node_set being searched
  * RET 1 if found, 0 otherwise
  */
-static int _match_feature(char *seek, char *available)
+static int _match_feature(char *seek, struct node_set *node_set_ptr)
 {
-	char *tmp_available = NULL, *str_ptr3 = NULL, *str_ptr4 = NULL;
-	int found;
+	int i;
 
 	if (seek == NULL)
 		return 1;	/* nothing to look for */
-	if (available == NULL)
+	if (node_set_ptr->feature_array == NULL)
 		return 0;	/* nothing to find */
 
-	tmp_available = xstrdup(available);
-	found = 0;
-	str_ptr3 = (char *) strtok_r(tmp_available, ",", &str_ptr4);
-	while (str_ptr3) {
-		if (strcmp(seek, str_ptr3) == 0) {	/* we have a match */
-			found = 1;
-			break;
-		}
-		str_ptr3 = (char *) strtok_r(NULL, ",", &str_ptr4);
+	for (i=0; node_set_ptr->feature_array[i]; i++) {
+		if (strcmp(seek, node_set_ptr->feature_array[i]) == 0)
+			return 1;	/* this is it */
 	}
-
-	xfree(tmp_available);
-	return found;
+	return 0;	/* not found */
 }
 
 
@@ -345,7 +334,8 @@ _get_req_features(struct node_set *node_set_ptr, int node_set_size,
 	saved_req_nodes = req_nodes;
 	saved_job_min_nodes = job_ptr->details->min_nodes;
 	if (job_ptr->details->req_node_bitmap) {
-		saved_req_node_bitmap = job_ptr->details->req_node_bitmap;
+		accumulate_bitmap = job_ptr->details->req_node_bitmap;
+		saved_req_node_bitmap = bit_copy(accumulate_bitmap);
 		job_ptr->details->req_node_bitmap = NULL;
 	}
 	saved_num_procs = job_ptr->num_procs;
@@ -360,7 +350,7 @@ _get_req_features(struct node_set *node_set_ptr, int node_set_size,
 		ListIterator feat_iter;
 		struct feature_record *feat_ptr;
 		feat_iter = list_iterator_create(job_ptr->details->feature_list);
-		while((feat_ptr = (struct feature_record *)
+		while ((feat_ptr = (struct feature_record *)
 				list_next(feat_iter))) {
 			if (feat_ptr->count == 0)
 				continue;
@@ -370,7 +360,7 @@ _get_req_features(struct node_set *node_set_ptr, int node_set_size,
 			 * purge it */
 			for (i=0; i<node_set_size; i++) {
 				if (!_match_feature(feat_ptr->name, 
-						node_set_ptr[i].features))
+						    node_set_ptr+i))
 					continue;
 				tmp_node_set_ptr[tmp_node_set_size].cpus_per_node =
 					node_set_ptr[i].cpus_per_node;
@@ -382,6 +372,8 @@ _get_req_features(struct node_set *node_set_ptr, int node_set_size,
 					node_set_ptr[i].weight;
 				tmp_node_set_ptr[tmp_node_set_size].features = 
 					xstrdup(node_set_ptr[i].features);
+				tmp_node_set_ptr[tmp_node_set_size].feature_array =
+					node_set_ptr[i].feature_array;
 				tmp_node_set_ptr[tmp_node_set_size].feature_bits = 
 					bit_copy(node_set_ptr[i].feature_bits);
 				tmp_node_set_ptr[tmp_node_set_size].my_bitmap = 
@@ -414,6 +406,13 @@ _get_req_features(struct node_set *node_set_ptr, int node_set_size,
 			if (error_code != SLURM_SUCCESS)
 				break;
 			if (feature_bitmap) {
+				if (job_ptr->details->req_node_bitmap) {
+					bit_or(job_ptr->details->req_node_bitmap,
+					       feature_bitmap);
+				} else {
+					job_ptr->details->req_node_bitmap =
+						bit_copy(feature_bitmap);
+				}
 				if (accumulate_bitmap) {
 					bit_or(accumulate_bitmap, feature_bitmap);
 					bit_free(feature_bitmap);
@@ -425,34 +424,61 @@ _get_req_features(struct node_set *node_set_ptr, int node_set_size,
 	}
 
 	/* restore most of job state and accumulate remaining resources */
-	min_nodes = saved_min_nodes;
-	req_nodes = saved_req_nodes;
-	job_ptr->details->min_nodes = saved_job_min_nodes;
-	job_ptr->num_procs = saved_num_procs;
 	if (saved_req_node_bitmap) {
 		FREE_NULL_BITMAP(job_ptr->details->req_node_bitmap);
 		job_ptr->details->req_node_bitmap = 
 				bit_copy(saved_req_node_bitmap);
 	}
 	if (accumulate_bitmap) {
+		uint32_t node_cnt;
 		if (job_ptr->details->req_node_bitmap) {
 			bit_or(job_ptr->details->req_node_bitmap, 
 				accumulate_bitmap);
 			FREE_NULL_BITMAP(accumulate_bitmap);
 		} else
 			job_ptr->details->req_node_bitmap = accumulate_bitmap;
+		node_cnt = bit_set_count(job_ptr->details->req_node_bitmap);
+		job_ptr->num_procs = MAX(saved_num_procs, node_cnt);
+		min_nodes = MAX(saved_min_nodes, node_cnt);
+		job_ptr->details->min_nodes = min_nodes;
+		req_nodes = MAX(min_nodes, req_nodes);
+		if (req_nodes > max_nodes)
+			error_code = ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE;
+	} else {
+		min_nodes = saved_min_nodes;
+		req_nodes = saved_req_nodes;
+		job_ptr->num_procs = saved_num_procs;
+		job_ptr->details->min_nodes = saved_job_min_nodes;
 	}
+#if 0
+{
+	char *tmp_str = bitmap2node_name(job_ptr->details->req_node_bitmap);
+	info("job %u requires %d:%d:%d nodes %s err:%u", 
+	     job_ptr->job_id, min_nodes, req_nodes, max_nodes,
+	     tmp_str, error_code);
+	xfree(tmp_str);
+}
+#endif
 	xfree(tmp_node_set_ptr);
 	if (error_code == SLURM_SUCCESS) {
 		error_code = _pick_best_nodes(node_set_ptr, node_set_size,
 				select_bitmap, job_ptr, part_ptr, min_nodes, 
 				max_nodes, req_nodes, test_only);
 	}
+#if 0
+{
+	char *tmp_str = bitmap2node_name(*select_bitmap);
+	info("job %u allocated nodes %s err:%u", 
+		job_ptr->job_id, tmp_str, error_code);
+	xfree(tmp_str);
+}
+#endif
 
 	/* restore job's initial required node bitmap */
 	FREE_NULL_BITMAP(job_ptr->details->req_node_bitmap);
 	job_ptr->details->req_node_bitmap = saved_req_node_bitmap;
-
+	job_ptr->num_procs = saved_num_procs;
+	job_ptr->details->min_nodes = saved_job_min_nodes;
 
 	return error_code;
 }
@@ -627,7 +653,6 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size,
 	 * features (possibly with node counts). */
 	for (j = min_feature; j <= max_feature; j++) {
 		for (i = 0; i < node_set_size; i++) {
-
 			if (!bit_test(node_set_ptr[i].feature_bits, j))
 				continue;
 
@@ -689,6 +714,19 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size,
 						      max_nodes,
 						      req_nodes,
 						      select_mode);
+#if 0
+{
+			char *tmp_str1 = bitmap2node_name(backup_bitmap);
+			char *tmp_str2 = bitmap2node_name(avail_bitmap);
+			info("pick job:%u err:%d nodes:%u:%u:%u mode:%u "
+			     "select %s of %s", 
+			     job_ptr->job_id, pick_code, 
+			     min_nodes, req_nodes, max_nodes, select_mode, 
+			     tmp_str2, tmp_str1);
+			xfree(tmp_str1);
+			xfree(tmp_str2);
+}
+#endif
 			if (pick_code == SLURM_SUCCESS) {
 				FREE_NULL_BITMAP(backup_bitmap);
 				if (bit_set_count(avail_bitmap) > max_nodes) {
@@ -787,7 +825,7 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size,
 		error_code = ESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE;
 	if (!runable_ever) {
 		error_code = ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE;
-		info("_pick_best_nodes %u : job never runnable", job_ptr->job_id);
+		info("_pick_best_nodes: job %u never runnable", job_ptr->job_id);
 	}
 
 	if (error_code == SLURM_SUCCESS) {
@@ -978,7 +1016,8 @@ extern int select_nodes(struct job_record *job_ptr, bool test_only,
 	if (job_ptr->mail_type & MAIL_JOB_BEGIN)
 		mail_job_info(job_ptr, MAIL_JOB_BEGIN);
 
-	jobacct_storage_g_job_start(acct_db_conn, job_ptr);
+	jobacct_storage_g_job_start(
+		acct_db_conn, slurmctld_cluster_name, job_ptr);
 
 	slurm_sched_newalloc(job_ptr);
 
@@ -998,162 +1037,6 @@ extern int select_nodes(struct job_record *job_ptr, bool test_only,
 	return error_code;
 }
 
-static void _print_feature_list(uint32_t job_id, List feature_list)
-{
-	ListIterator feat_iter;
-	struct feature_record *feat_ptr;
-	char *buf = NULL, tmp[16];
-	int bracket = 0;
-
-	if (feature_list == NULL) {
-		info("Job %u feature list is empty", job_id);
-		return;
-	}
-
-	feat_iter = list_iterator_create(feature_list);
-	while((feat_ptr = (struct feature_record *)list_next(feat_iter))) {
-		if (feat_ptr->op_code == FEATURE_OP_XOR) {
-			if (bracket == 0)
-				xstrcat(buf, "[");
-			bracket = 1;
-		}
-		xstrcat(buf, feat_ptr->name);
-		if (feat_ptr->count) {
-			snprintf(tmp, sizeof(tmp), "*%u", feat_ptr->count);
-			xstrcat(buf, tmp);
-		}
-		if (bracket && (feat_ptr->op_code != FEATURE_OP_XOR)) {
-			xstrcat(buf, "]");
-			bracket = 0;
-		}
-		if (feat_ptr->op_code == FEATURE_OP_AND)
-			xstrcat(buf, "&");
-		else if ((feat_ptr->op_code == FEATURE_OP_OR) ||
-			 (feat_ptr->op_code == FEATURE_OP_XOR))
-			xstrcat(buf, "|");
-	}
-	list_iterator_destroy(feat_iter);
-	info("Job %u feature list: %s", job_id, buf);
-	xfree(buf);
-}
-
-static void _feature_list_delete(void *x)
-{
-	struct feature_record *feature = (struct feature_record *)x;
-	xfree(feature->name);
-	xfree(feature);
-}
-
-/*
- * _build_feature_list - Translate a job's feature string into a feature_list
- * IN  details->features
- * OUT details->feature_list
- * RET error code
- */
-static int _build_feature_list(struct job_record *job_ptr)
-{
-	struct job_details *detail_ptr = job_ptr->details;
-	char *tmp_requested, *str_ptr1, *str_ptr2, *feature = NULL;
-	int bracket = 0, count = 0, i;
-	bool have_count = false, have_or = false;
-	struct feature_record *feat;
-
-	if (detail_ptr->features == NULL)	/* no constraints */
-		return SLURM_SUCCESS;
-	if (detail_ptr->feature_list)		/* already processed */
-		return SLURM_SUCCESS;
-
-	tmp_requested = xstrdup(detail_ptr->features);
-	str_ptr1 = tmp_requested;
-	detail_ptr->feature_list = list_create(_feature_list_delete);
-	for (i=0; ; i++) {
-		if (tmp_requested[i] == '*') {
-			tmp_requested[i] = '\0';
-			have_count = true;
-			count = strtol(&tmp_requested[i+1], &str_ptr2, 10);
-			if ((feature == NULL) || (count <= 0)) {
-				info("Job %u invalid constraint %s", 
-					job_ptr->job_id, detail_ptr->features);
-				xfree(tmp_requested);
-				return ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE;
-			}
-			i = str_ptr2 - tmp_requested - 1;
-		} else if (tmp_requested[i] == '&') {
-			tmp_requested[i] = '\0';
-			if ((feature == NULL) || (bracket != 0)) {
-				info("Job %u invalid constraint %s", 
-					job_ptr->job_id, detail_ptr->features);
-				xfree(tmp_requested);
-				return ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE;
-			}
-			feat = xmalloc(sizeof(struct feature_record));
-			feat->name = xstrdup(feature);
-			feat->count = count;
-			feat->op_code = FEATURE_OP_AND;
-			list_append(detail_ptr->feature_list, feat);
-			feature = NULL;
-			count = 0;
-		} else if (tmp_requested[i] == '|') {
-			tmp_requested[i] = '\0';
-			have_or = true;
-			if (feature == NULL) {
-				info("Job %u invalid constraint %s", 
-					job_ptr->job_id, detail_ptr->features);
-				xfree(tmp_requested);
-				return ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE;
-			}
-			feat = xmalloc(sizeof(struct feature_record));
-			feat->name = xstrdup(feature);
-			feat->count = count;
-			if (bracket)
-				feat->op_code = FEATURE_OP_XOR;
-			else
-				feat->op_code = FEATURE_OP_OR;
-			list_append(detail_ptr->feature_list, feat);
-			feature = NULL;
-			count = 0;
-		} else if (tmp_requested[i] == '[') {
-			tmp_requested[i] = '\0';
-			if ((feature != NULL) || bracket) {
-				info("Job %u invalid constraint %s", 
-					job_ptr->job_id, detail_ptr->features);
-				xfree(tmp_requested);
-				return ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE;
-			}
-			bracket++;
-		} else if (tmp_requested[i] == ']') {
-			tmp_requested[i] = '\0';
-			if ((feature == NULL) || (bracket == 0)) {
-				info("Job %u invalid constraint %s", 
-					job_ptr->job_id, detail_ptr->features);
-				xfree(tmp_requested);
-				return ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE;
-			}
-			bracket = 0;
-		} else if (tmp_requested[i] == '\0') {
-			if (feature) {
-				feat = xmalloc(sizeof(struct feature_record));
-				feat->name = xstrdup(feature);
-				feat->count = count;
-				feat->op_code = FEATURE_OP_END;
-				list_append(detail_ptr->feature_list, feat);
-			}
-			break;
-		} else if (feature == NULL) {
-			feature = &tmp_requested[i];
-		}
-	}
-	xfree(tmp_requested);
-	if (have_count && have_or) {
-		info("Job %u invalid constraint (OR with feature count): %s", 
-			job_ptr->job_id, detail_ptr->features);
-		return ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE;
-	}
-
-	_print_feature_list(job_ptr->job_id, detail_ptr->feature_list);
-	return SLURM_SUCCESS;
-}
-
 /*
  * job_req_node_filter - job reqeust node filter.
  *	clear from a bitmap the nodes which can not be used for a job
@@ -1179,8 +1062,6 @@ extern int job_req_node_filter(struct job_record *job_ptr,
 		      job_ptr->job_id);
 		return EINVAL;
 	}
-	if (_build_feature_list(job_ptr))
-		return EINVAL;
 
 	mc_ptr = detail_ptr->mc_ptr;
 	for (i=0; i< node_record_count; i++) {
@@ -1188,7 +1069,7 @@ extern int job_req_node_filter(struct job_record *job_ptr,
 			continue;
 		node_ptr = node_record_table_ptr + i;
 		config_ptr = node_ptr->config_ptr;
-		feature_bitmap = _valid_features(detail_ptr, config_ptr->feature);
+		feature_bitmap = _valid_features(detail_ptr, config_ptr);
 		if ((feature_bitmap == NULL) || (!bit_test(feature_bitmap, 0))) {
 			bit_clear(avail_bitmap, i);
 			continue;
@@ -1261,11 +1142,6 @@ static int _build_node_list(struct job_record *job_ptr,
 	multi_core_data_t *mc_ptr = detail_ptr->mc_ptr;
 	bitstr_t *tmp_feature;
 
-	if (detail_ptr->features && (detail_ptr->feature_list == NULL)) {
-		int error_code = _build_feature_list(job_ptr);
-		if (error_code)
-			return error_code;
-	}
 	node_set_inx = 0;
 	node_set_ptr = (struct node_set *) 
 			xmalloc(sizeof(struct node_set) * 2);
@@ -1333,8 +1209,7 @@ static int _build_node_list(struct job_record *job_ptr,
 			continue;
 		}
 
-		tmp_feature = _valid_features(job_ptr->details,
-					      config_ptr->feature);
+		tmp_feature = _valid_features(job_ptr->details, config_ptr);
 		if (tmp_feature == NULL) {
 			FREE_NULL_BITMAP(node_set_ptr[node_set_inx].my_bitmap);
 			continue;
@@ -1349,6 +1224,8 @@ static int _build_node_list(struct job_record *job_ptr,
 			config_ptr->weight;
 		node_set_ptr[node_set_inx].features = 
 			xstrdup(config_ptr->feature);
+		node_set_ptr[node_set_inx].feature_array = 
+			config_ptr->feature_array;	/* NOTE: NOT COPIED */
 		node_set_ptr[node_set_inx].feature_bits = tmp_feature;
 		debug2("found %d usable nodes from config containing %s",
 		       node_set_ptr[node_set_inx].nodes, config_ptr->nodes);
@@ -1479,6 +1356,22 @@ static int _nodes_in_sets(bitstr_t *req_bitmap,
 	return error_code;
 }
 
+/* Update record of a job's allocated processors for each step */
+static void _alloc_step_cpus(struct job_record *job_ptr)
+{
+	ListIterator step_iterator;
+	struct step_record *step_ptr;
+
+	if (job_ptr->step_list == NULL)
+		return;
+
+	step_iterator = list_iterator_create(job_ptr->step_list);
+	while ((step_ptr = (struct step_record *) list_next(step_iterator))) {
+		step_alloc_lps(step_ptr);
+	}
+	list_iterator_destroy(step_iterator);
+}
+
 /*
  * build_node_details - set cpu counts and addresses for allocated nodes:
  *	cpu_count_reps, cpus_per_node, node_addr, node_cnt, num_cpu_groups
@@ -1599,13 +1492,15 @@ extern void build_node_details(struct job_record *job_ptr)
 	}
 	job_ptr->num_cpu_groups = cpu_inx + 1;
 	job_ptr->total_procs = total_procs;
+	if (job_ptr->used_lps)	/* reset counters */
+		_alloc_step_cpus(job_ptr);
 }
 
 /*
  * _valid_features - determine if the requested features are satisfied by
- *	those available
+ *	the available nodes
  * IN details_ptr - job requirement details, includes requested features
- * IN available - available features (on a node)
+ * IN config_ptr - node's configuration record
  * RET NULL if request is not satisfied, otherwise a bitmap indicating 
  *	which mutually exclusive features are satisfied. For example
  *	_valid_features("[fs1|fs2|fs3|fs4]", "fs3") returns a bitmap with
@@ -1616,7 +1511,7 @@ extern void build_node_details(struct job_record *job_ptr)
  *	mutually exclusive feature list.
  */
 static bitstr_t *_valid_features(struct job_details *details_ptr, 
-				 char *available)
+				 struct config_record *config_ptr)
 {
 	bitstr_t *result_bits = (bitstr_t *) NULL;
 	ListIterator feat_iter;
@@ -1634,10 +1529,19 @@ static bitstr_t *_valid_features(struct job_details *details_ptr,
 	last_op = FEATURE_OP_AND;
 	feat_iter = list_iterator_create(details_ptr->feature_list);
 	while ((feat_ptr = (struct feature_record *) list_next(feat_iter))) {
+		found = 0;
 		if (feat_ptr->count)
-			found = 1;	/* handle feature counts elsewhere */
-		else
-			found = _match_feature(feat_ptr->name, available);
+			found = 1;
+		else if (config_ptr->feature_array) {
+			int i;
+			for (i=0; config_ptr->feature_array[i]; i++) {
+				if (strcmp(feat_ptr->name, 
+					   config_ptr->feature_array[i]))
+					continue;
+				found = 1;
+				break;
+			}
+		}
 
 		if ((last_op == FEATURE_OP_XOR) ||
 		    (feat_ptr->op_code == FEATURE_OP_XOR)) {
diff --git a/src/slurmctld/partition_mgr.c b/src/slurmctld/partition_mgr.c
index cd86e7d17..6a0b0976f 100644
--- a/src/slurmctld/partition_mgr.c
+++ b/src/slurmctld/partition_mgr.c
@@ -2,7 +2,7 @@
  *  partition_mgr.c - manage the partition information of slurm
  *	Note: there is a global partition list (part_list) and
  *	time stamp (last_part_update)
- *  $Id: partition_mgr.c 14795 2008-08-15 21:54:22Z jette $
+ *  $Id: partition_mgr.c 15121 2008-09-19 18:31:06Z da $
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -1093,11 +1093,12 @@ uid_t *_get_group_members(char *group_name)
 		}
 	}
 
-	setpwent();
 #ifdef HAVE_AIX
+	setpwent_r(&fp);
 	while (!getpwent_r(&pw, pw_buffer, PW_BUF_SIZE, &fp)) {
 		pwd_result = &pw;
 #else
+	setpwent();
 	while (!getpwent_r(&pw, pw_buffer, PW_BUF_SIZE, &pwd_result)) {
 #endif
  		if (pwd_result->pw_gid != my_gid)
@@ -1106,7 +1107,11 @@ uid_t *_get_group_members(char *group_name)
  		xrealloc(group_uids, ((j+1) * sizeof(uid_t)));
 		group_uids[j-1] = pwd_result->pw_uid;
 	}
+#ifdef HAVE_AIX
+	endpwent_r(&fp);
+#else
 	endpwent();
+#endif
 
 	return group_uids;
 }
diff --git a/src/slurmctld/power_save.c b/src/slurmctld/power_save.c
index 4e6d3edba..ec454af5f 100644
--- a/src/slurmctld/power_save.c
+++ b/src/slurmctld/power_save.c
@@ -173,6 +173,7 @@ static void _re_wake(void)
 	struct node_record *node_ptr;
 	bitstr_t *wake_node_bitmap = NULL;
 	int i, lim = MIN(node_record_count, 20);
+	uint16_t base_state, susp_state;
 
 	/* Run at most once per minute */
 	if ((now - last_wakeup) < 60)
@@ -181,7 +182,12 @@ static void _re_wake(void)
 
 	for (i=0; i<lim; i++) {
 		node_ptr = &node_record_table_ptr[last_inx];
-		if ((node_ptr->node_state & NODE_STATE_POWER_SAVE) == 0) {
+		base_state = node_ptr->node_state & NODE_STATE_BASE;
+		susp_state = node_ptr->node_state & NODE_STATE_POWER_SAVE;
+
+		if ((susp_state == 0) &&
+		    ((base_state == NODE_STATE_ALLOCATED) ||
+		     (base_state == NODE_STATE_IDLE))) {
 			if (wake_node_bitmap == NULL)
 				wake_node_bitmap = bit_alloc(node_record_count);
 			bit_set(wake_node_bitmap, last_inx);
diff --git a/src/slurmctld/proc_req.c b/src/slurmctld/proc_req.c
index 43e396a35..55d46b0d5 100644
--- a/src/slurmctld/proc_req.c
+++ b/src/slurmctld/proc_req.c
@@ -129,6 +129,7 @@ inline static void  _slurm_rpc_end_time(slurm_msg_t * msg);
 inline static void  _update_cred_key(void);
 inline static void  _slurm_rpc_set_debug_level(slurm_msg_t *msg);
 inline static void  _slurm_rpc_accounting_update_msg(slurm_msg_t *msg);
+inline static void  _slurm_rpc_accounting_first_reg(slurm_msg_t *msg);
 
 
 /*
@@ -319,6 +320,10 @@ void slurmctld_req (slurm_msg_t * msg)
 		_slurm_rpc_accounting_update_msg(msg);
 		slurm_free_accounting_update_msg(msg->data);
 		break;
+	case ACCOUNTING_FIRST_REG:
+		_slurm_rpc_accounting_first_reg(msg);
+		/* No body to free */
+		break;
 	default:
 		error("invalid RPC msg_type=%d", msg->msg_type);
 		slurm_send_rc_msg(msg, EINVAL);
@@ -425,6 +430,8 @@ void _fill_ctld_conf(slurm_ctl_conf_t * conf_ptr)
 	conf_ptr->resume_rate         = conf->resume_rate;
 	conf_ptr->ret2service         = conf->ret2service;
 
+	conf_ptr->salloc_default_command = xstrdup(conf->
+						   salloc_default_command);
 	if (conf->sched_params)
 		conf_ptr->sched_params = xstrdup(conf->sched_params);
 	else
@@ -518,8 +525,11 @@ static int _make_step_cred(struct step_record *step_rec,
 	cred_arg.uid      = job_ptr->user_id;
 	cred_arg.job_mem  = job_ptr->details->job_min_memory;
 	cred_arg.task_mem = step_rec->mem_per_task;
+#ifdef HAVE_FRONT_END
+	cred_arg.hostlist = node_record_table_ptr[0].name;
+#else
 	cred_arg.hostlist = step_rec->step_layout->node_list;
-	
+#endif
 	cred_arg.alloc_lps_cnt = job_ptr->alloc_lps_cnt;
 	if ((cred_arg.alloc_lps_cnt > 0) &&
 	    bit_equal(job_ptr->node_bitmap, step_rec->step_node_bitmap)) {
@@ -553,8 +563,7 @@ static int _make_step_cred(struct step_record *step_rec,
 		cred_arg.alloc_lps = NULL;
 	}
 
-	*slurm_cred = slurm_cred_create(slurmctld_config.cred_ctx, 
-			&cred_arg);
+	*slurm_cred = slurm_cred_create(slurmctld_config.cred_ctx, &cred_arg);
 	xfree(cred_arg.alloc_lps);
 	if (*slurm_cred == NULL) {
 		error("slurm_cred_create error");
@@ -845,7 +854,7 @@ static void _slurm_rpc_dump_nodes(slurm_msg_t * msg)
 	&&  (!validate_super_user(uid))) {
 		unlock_slurmctld(node_read_lock);
 		error("Security violation, REQUEST_NODE_INFO RPC from uid=%d", uid);
-		slurm_send_rc_msg(msg, ESLURM_USER_ID_MISSING);
+		slurm_send_rc_msg(msg, ESLURM_ACCESS_DENIED);
 	} else if ((node_req_msg->last_update - 1) >= last_node_update) {
 		unlock_slurmctld(node_read_lock);
 		debug2("_slurm_rpc_dump_nodes, no change");
@@ -895,7 +904,7 @@ static void _slurm_rpc_dump_partitions(slurm_msg_t * msg)
 	&&  (!validate_super_user(uid))) {
 		unlock_slurmctld(part_read_lock);
 		debug2("Security violation, PARTITION_INFO RPC from uid=%d", uid);
-		slurm_send_rc_msg(msg, ESLURM_USER_ID_MISSING);
+		slurm_send_rc_msg(msg, ESLURM_ACCESS_DENIED);
 	} else if ((part_req_msg->last_update - 1) >= last_part_update) {
 		unlock_slurmctld(part_read_lock);
 		debug2("_slurm_rpc_dump_partitions, no change");
@@ -1741,7 +1750,8 @@ static void _slurm_rpc_shutdown_controller_immediate(slurm_msg_t * msg)
  *	represent the termination of an entire job */
 static void _slurm_rpc_step_complete(slurm_msg_t *msg)
 {
-	int error_code = SLURM_SUCCESS, rc, rem, step_rc;
+	int error_code = SLURM_SUCCESS, rc, rem;
+	uint32_t step_rc;
 	DEF_TIMERS;
 	step_complete_msg_t *req = (step_complete_msg_t *)msg->data;
 	/* Locks: Write job, write node */
@@ -1800,7 +1810,7 @@ static void _slurm_rpc_step_complete(slurm_msg_t *msg)
 		}
 	} else {
 		error_code = job_step_complete(req->job_id, req->job_step_id,
-				uid, job_requeue, step_rc);
+					       uid, job_requeue, step_rc);
 		unlock_slurmctld(job_write_lock);
 		END_TIMER2("_slurm_rpc_step_complete");
 
@@ -2293,7 +2303,7 @@ static void  _slurm_rpc_node_select_info(slurm_msg_t * msg)
 	lock_slurmctld(config_read_lock);
 	if ((slurmctld_conf.private_data & PRIVATE_DATA_NODES)
 	&&  (!validate_super_user(uid))) {
-		error_code = ESLURM_USER_ID_MISSING;
+		error_code = ESLURM_ACCESS_DENIED;
 		error("Security violation, NODE_SELECT_INFO RPC from uid=u",
 			(unsigned int) uid);
 	} 
@@ -2919,7 +2929,10 @@ inline static void  _slurm_rpc_accounting_update_msg(slurm_msg_t *msg)
 	START_TIMER;
 	debug2("Processing RPC: ACCOUNTING_UPDATE_MSG from uid=%u",
 		(unsigned int) uid);
-	if (!validate_super_user(uid)) {
+
+	if (!validate_super_user(uid) 
+	    && (assoc_mgr_get_admin_level(acct_db_conn, uid)
+		< ACCT_ADMIN_SUPER_USER)) {
 		error("Update Association request from non-super user uid=%d", 
 		      uid);
 		slurm_send_rc_msg(msg, EACCES);
@@ -2944,10 +2957,11 @@ inline static void  _slurm_rpc_accounting_update_msg(slurm_msg_t *msg)
 				rc = assoc_mgr_update_local_assocs(object);
 				break;
 			case ACCT_ADD_QOS:
+			case ACCT_MODIFY_QOS:
 			case ACCT_REMOVE_QOS:
-			case ACCT_UPDATE_NOTSET:
 				rc = assoc_mgr_update_local_qos(object);
 				break;
+			case ACCT_UPDATE_NOTSET:
 			default:
 				error("unknown type set in update_object: %d",
 				      object->type);
@@ -2962,3 +2976,24 @@ inline static void  _slurm_rpc_accounting_update_msg(slurm_msg_t *msg)
 	slurm_send_rc_msg(msg, rc);
 }
 
+inline static void  _slurm_rpc_accounting_first_reg(slurm_msg_t *msg)
+{
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	time_t event_time = time(NULL);
+	
+	DEF_TIMERS;
+
+	START_TIMER;
+	debug2("Processing RPC: ACCOUNTING_FIRST_REG from uid=%u",
+		(unsigned int) uid);
+	if (!validate_super_user(uid)) {
+		error("Update Association request from non-super user uid=%d", 
+		      uid);
+		return;
+	}
+
+	send_jobs_to_accounting(event_time);
+	send_nodes_to_accounting(event_time);
+	
+	END_TIMER2("_slurm_rpc_accounting_first_reg");
+}
diff --git a/src/slurmctld/proc_req.h b/src/slurmctld/proc_req.h
index e3f3ed89b..fad93e53a 100644
--- a/src/slurmctld/proc_req.h
+++ b/src/slurmctld/proc_req.h
@@ -68,5 +68,6 @@ extern int slurm_drain_nodes(char *node_list, char *reason);
  *	own locks.
  */
 extern int slurm_fail_job(uint32_t job_id);
+
 #endif /* !_HAVE_PROC_REQ_H */
 
diff --git a/src/slurmctld/read_config.c b/src/slurmctld/read_config.c
index 8a31935ff..4c25afdfe 100644
--- a/src/slurmctld/read_config.c
+++ b/src/slurmctld/read_config.c
@@ -569,6 +569,7 @@ static int _build_all_nodeline_info(slurm_ctl_conf_t *conf)
 		config_ptr->weight = node->weight;
 		if (node->feature)
 			config_ptr->feature = xstrdup(node->feature);
+		build_config_feature_array(config_ptr);
 
 		_build_single_nodeline_info(node, config_ptr, conf);
 	}
@@ -1246,7 +1247,8 @@ static void _acct_restore_active_jobs(void)
 			jobacct_storage_g_job_suspend(acct_db_conn, job_ptr);
 		if ((job_ptr->job_state == JOB_SUSPENDED) ||
 		    (job_ptr->job_state == JOB_RUNNING)) {
-			jobacct_storage_g_job_start(acct_db_conn, job_ptr);
+			jobacct_storage_g_job_start(
+				acct_db_conn, slurmctld_cluster_name, job_ptr);
 			step_iterator = list_iterator_create(job_ptr->step_list);
 			while ((step_ptr = (struct step_record *) 
 					   list_next(step_iterator))) {
diff --git a/src/slurmctld/slurmctld.h b/src/slurmctld/slurmctld.h
index 0fe112c08..da6cab3fb 100644
--- a/src/slurmctld/slurmctld.h
+++ b/src/slurmctld/slurmctld.h
@@ -166,6 +166,7 @@ extern int bg_recover;		/* state recovery mode */
 extern char *slurmctld_cluster_name; /* name of cluster */
 extern void *acct_db_conn;
 extern int accounting_enforce;
+extern int association_based_accounting;
 
 /*****************************************************************************\
  *  NODE parameters and data structures
@@ -184,6 +185,7 @@ struct config_record {
 	uint32_t weight;	/* arbitrary priority of node for 
 				 * scheduling work on */
 	char *feature;		/* arbitrary list of features associated */
+	char **feature_array;	/* array of feature names */
 	char *nodes;		/* name of nodes with this configuration */
 	bitstr_t *node_bitmap;	/* bitmap of nodes with this configuration */
 };
@@ -455,6 +457,7 @@ struct job_record {
 #define SLURM_DEPEND_AFTER_ANY		2
 #define SLURM_DEPEND_AFTER_NOT_OK	3
 #define SLURM_DEPEND_AFTER_OK		4
+#define SLURM_DEPEND_SINGLETON		5
 struct	depend_spec {
 	uint16_t	depend_type;	/* SLURM_DEPEND_* type */
 	uint32_t	job_id;		/* SLURM job_id */
@@ -544,6 +547,10 @@ extern void abort_job_on_node(uint32_t job_id, struct job_record *job_ptr,
  */
 extern char * bitmap2node_name (bitstr_t *bitmap) ;
 
+/* Given a config_record, clear any existing feature_array and
+ * if feature is set, then rebuild feature_array */
+extern void  build_config_feature_array(struct config_record *config_ptr);
+
 /*
  * create_config_record - create a config_record entry and set is values to 
  *	the defaults. each config record corresponds to a line in the  
@@ -1309,6 +1316,16 @@ extern void run_health_check(void);
 /* save_all_state - save entire slurmctld state for later recovery */
 extern void save_all_state(void);
 
+/* sends all jobs in eligible state to accounting.  Only needed at
+ * first registration
+ */
+extern int send_jobs_to_accounting(time_t event_time);
+
+/* send all nodes in a down like state to accounting.  Only needed at
+ * first registration
+ */
+extern int send_nodes_to_accounting(time_t event_time);
+
 /*
  * set_node_down - make the specified node's state DOWN if possible
  *	(not in a DRAIN state), kill jobs as needed 
@@ -1411,7 +1428,7 @@ extern bool step_on_node(struct job_record  *job_ptr,
  * RET 0 on success, otherwise ESLURM error code
  */
 extern int step_partial_comp(step_complete_msg_t *req, int *rem,
-		int *max_rc);
+			     uint32_t *max_rc);
 
 /* Update time stamps for job step suspend */
 extern void suspend_job_step(struct job_record *job_ptr);
diff --git a/src/slurmctld/step_mgr.c b/src/slurmctld/step_mgr.c
index 11fa939f3..5037e3e3a 100644
--- a/src/slurmctld/step_mgr.c
+++ b/src/slurmctld/step_mgr.c
@@ -1,6 +1,6 @@
 /*****************************************************************************\
  *  step_mgr.c - manage the job step information of slurm
- *  $Id: step_mgr.c 14621 2008-07-24 15:24:59Z jette $
+ *  $Id: step_mgr.c 15194 2008-09-26 20:15:00Z da $
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -1447,7 +1447,7 @@ extern int job_step_checkpoint_task_comp(checkpoint_task_comp_msg_t *ckpt_ptr,
  * RET 0 on success, otherwise ESLURM error code
  */
 extern int step_partial_comp(step_complete_msg_t *req, int *rem, 
-		int *max_rc)
+			     uint32_t *max_rc)
 {
 	struct job_record *job_ptr;
 	struct step_record *step_ptr;
@@ -1484,7 +1484,7 @@ extern int step_partial_comp(step_complete_msg_t *req, int *rem,
 
 	jobacct_gather_g_aggregate(step_ptr->jobacct, req->jobacct);
 
-	if (step_ptr->exit_code == NO_VAL) {
+	if (!step_ptr->exit_node_bitmap) {
 		/* initialize the node bitmap for exited nodes */
 		nodes = bit_set_count(step_ptr->step_node_bitmap);
 		if (req->range_last >= nodes) {	/* range is zero origin */
@@ -1492,13 +1492,11 @@ extern int step_partial_comp(step_complete_msg_t *req, int *rem,
 				req->range_last, nodes);
 			return EINVAL;
 		}
-		xassert(step_ptr->exit_node_bitmap == NULL);
 		step_ptr->exit_node_bitmap = bit_alloc(nodes);
 		if (step_ptr->exit_node_bitmap == NULL)
 			fatal("bit_alloc: %m");
 		step_ptr->exit_code = req->step_rc;
 	} else {
-		xassert(step_ptr->exit_node_bitmap);
 		nodes = _bitstr_bits(step_ptr->exit_node_bitmap);
 		if (req->range_last >= nodes) {	/* range is zero origin */
 			error("step_partial_comp: last=%u, nodes=%d",
@@ -1508,8 +1506,7 @@ extern int step_partial_comp(step_complete_msg_t *req, int *rem,
 		step_ptr->exit_code = MAX(step_ptr->exit_code, req->step_rc);
 	}
 
-	bit_nset(step_ptr->exit_node_bitmap, req->range_first,
-		req->range_last);
+	bit_nset(step_ptr->exit_node_bitmap, req->range_first, req->range_last);
 	rem_nodes = bit_clear_count(step_ptr->exit_node_bitmap);
 	if (rem)
 		*rem = rem_nodes;
@@ -1843,7 +1840,7 @@ extern int load_step_state(struct job_record *job_ptr, Buf buffer)
 
 	if (step_ptr->step_layout && step_ptr->step_layout->node_list) {
 		switch_g_job_step_allocated(switch_tmp, 
-				    step_ptr->step_layout->node_list);
+					    step_ptr->step_layout->node_list);
 	} else {
 		switch_g_job_step_allocated(switch_tmp, NULL);
 	}
diff --git a/src/slurmd/slurmd/req.c b/src/slurmd/slurmd/req.c
index f46df89eb..9d91d9a92 100644
--- a/src/slurmd/slurmd/req.c
+++ b/src/slurmd/slurmd/req.c
@@ -906,11 +906,11 @@ _get_user_env(batch_job_launch_msg_t *req)
 	char **new_env;
 	int i;
 
-	for (i=0; i<req->argc; i++) {
-		if (strcmp(req->environment[0], "SLURM_GET_USER_ENV=1") == 0)
+	for (i=0; i<req->envc; i++) {
+		if (strcmp(req->environment[i], "SLURM_GET_USER_ENV=1") == 0)
 			break;
 	}
-	if (i >= req->argc)
+	if (i >= req->envc)
 		return;		/* don't need to load env */
 
 	if (getpwuid_r(req->uid, &pwd, pwd_buf, PW_BUF_SIZE, &pwd_ptr) ||
@@ -3193,7 +3193,7 @@ init_gids_cache(int cache)
 	getgroups(ngids, orig_gids);
 
 #ifdef HAVE_AIX
-	setpwent(&fp);
+	setpwent_r(&fp);
 	while (!getpwent_r(&pw, buf, BUF_SIZE, &fp)) {
 		pwd = &pw;
 #else
diff --git a/src/slurmd/slurmd/slurmd.c b/src/slurmd/slurmd/slurmd.c
index 08d94f8cc..2e54f4f8b 100644
--- a/src/slurmd/slurmd/slurmd.c
+++ b/src/slurmd/slurmd/slurmd.c
@@ -1,6 +1,6 @@
 /*****************************************************************************\
  *  src/slurmd/slurmd/slurmd.c - main slurm node server daemon
- *  $Id: slurmd.c 14314 2008-06-23 20:57:56Z jette $
+ *  $Id: slurmd.c 15006 2008-09-08 20:47:15Z da $
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008 Lawrence Livermore National Security.
@@ -589,9 +589,11 @@ _read_config()
 	 * valid aliases */
 	if (conf->node_name == NULL)
 		conf->node_name = slurm_conf_get_aliased_nodename();
-	if (conf->node_name == NULL)
+	
+	if (conf->node_name == NULL) 
 		conf->node_name = slurm_conf_get_nodename("localhost");
-	if (conf->node_name == NULL)
+
+	if (conf->node_name == NULL) 
 		fatal("Unable to determine this slurmd's NodeName");
 
 	_massage_pathname(&conf->logfile);
diff --git a/src/slurmd/slurmstepd/mgr.c b/src/slurmd/slurmstepd/mgr.c
index a92c1ae0d..8a423f349 100644
--- a/src/slurmd/slurmstepd/mgr.c
+++ b/src/slurmd/slurmstepd/mgr.c
@@ -1,6 +1,6 @@
 /*****************************************************************************\
  *  src/slurmd/slurmstepd/mgr.c - job manager functions for slurmstepd
- *  $Id: mgr.c 14702 2008-08-05 22:18:13Z jette $
+ *  $Id: mgr.c 14994 2008-09-05 21:31:37Z da $
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008 Lawrence Livermore National Security.
@@ -233,9 +233,25 @@ static void
 _batch_finish(slurmd_job_t *job, int rc)
 {
 	int i;
-	for (i = 0; i < job->ntasks; i++)
+	for (i = 0; i < job->ntasks; i++) {
+		/* If signalled we only need to check one and then
+		   break out of the loop */ 
+		if(WIFSIGNALED(job->task[i]->estatus)) {
+			switch(WTERMSIG(job->task[i]->estatus)) {
+			case SIGTERM:
+			case SIGKILL:
+			case SIGINT:
+				step_complete.step_rc = NO_VAL;
+				break;
+			default:
+				step_complete.step_rc = job->task[i]->estatus;
+				break;
+			}
+			break;
+		}
 		step_complete.step_rc = MAX(step_complete.step_rc,
 					    WEXITSTATUS(job->task[i]->estatus));
+	}
 
 	if (job->argv[0] && (unlink(job->argv[0]) < 0))
 		error("unlink(%s): %m", job->argv[0]);
@@ -494,10 +510,25 @@ _wait_for_children_slurmstepd(slurmd_job_t *job)
 	}
 
 	/* Find the maximum task return code */
-	for (i = 0; i < job->ntasks; i++)
+	for (i = 0; i < job->ntasks; i++) {
+		/* If signalled we only need to check one and then
+		   break out of the loop */ 
+		if(WIFSIGNALED(job->task[i]->estatus)) {
+			switch(WTERMSIG(job->task[i]->estatus)) {
+			case SIGTERM:
+			case SIGKILL:
+			case SIGINT:
+				step_complete.step_rc = NO_VAL;
+				break;
+			default:
+				step_complete.step_rc = job->task[i]->estatus;
+				break;
+			}
+			break;
+		}
 		step_complete.step_rc = MAX(step_complete.step_rc,
-					 WEXITSTATUS(job->task[i]->estatus));
-
+					    WEXITSTATUS(job->task[i]->estatus));
+	}
 	step_complete.wait_children = false;
 
 	pthread_mutex_unlock(&step_complete.lock);
diff --git a/src/slurmd/slurmstepd/req.c b/src/slurmd/slurmstepd/req.c
index ebfbd430a..d4d06d0b4 100644
--- a/src/slurmd/slurmstepd/req.c
+++ b/src/slurmd/slurmstepd/req.c
@@ -174,10 +174,16 @@ _domain_socket_create(const char *dir, const char *nodename,
 	 * First check to see if the named socket already exists.
 	 */
 	if (stat(name, &stat_buf) == 0) {
-		error("Socket %s already exists", name);
-		xfree(name);
-		errno = ESLURMD_STEP_EXISTS;
-		return -1;
+		/* Vestigial from a slurmd crash or job requeue that did not
+		 * happen properly (very rare conditions). Try another name */
+		xstrcat(name, ".ALT");
+		if (stat(name, &stat_buf) == 0) {
+			error("Socket %s already exists", name);
+			xfree(name);
+			errno = ESLURMD_STEP_EXISTS;
+			return -1;
+		}
+		error("Using alternate socket name %s", name);
 	}
 
 	fd = _create_socket(name);
@@ -719,7 +725,8 @@ _handle_signal_container(int fd, slurmd_job_t *job, uid_t uid)
 		goto done;
 	}
 
-	if ((job->nodeid == 0) && (msg_sent == 0)) {
+	if ((job->nodeid == 0) && (msg_sent == 0) && 
+	    (job->state < SLURMSTEPD_STEP_ENDING)) {
 		time_t now = time(NULL);
 		char entity[24], time_str[24];
 		if (job->stepid == SLURM_BATCH_SCRIPT) {
diff --git a/src/slurmd/slurmstepd/slurmstepd_job.c b/src/slurmd/slurmstepd/slurmstepd_job.c
index 89641b908..14de1ec02 100644
--- a/src/slurmd/slurmstepd/slurmstepd_job.c
+++ b/src/slurmd/slurmstepd/slurmstepd_job.c
@@ -1,6 +1,6 @@
 /*****************************************************************************\
  * src/slurmd/slurmstepd/slurmstepd_job.c - slurmd_job_t routines
- * $Id: slurmstepd_job.c 14753 2008-08-12 22:40:54Z da $
+ * $Id: slurmstepd_job.c 15043 2008-09-09 23:45:19Z jette $
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -378,11 +378,11 @@ job_batch_job_create(batch_job_launch_msg_t *msg)
 		job->argc    = msg->argc;
 		job->argv    = _array_copy(job->argc, msg->argv);
 	} else {
-		job->argc    = 2;
+		job->argc    = 1;
 		/* job script has not yet been written out to disk --
-		 * argv will be filled in later
+		 * argv will be filled in later by _make_batch_script()
 		 */
-		job->argv    = (char **) xmalloc(job->argc * sizeof(char *));
+		job->argv    = (char **) xmalloc(sizeof(char *));
 	}
 
 	job->task = (slurmd_task_info_t **)
diff --git a/src/slurmdbd/proc_req.c b/src/slurmdbd/proc_req.c
index cad5fbb1a..aaff52381 100644
--- a/src/slurmdbd/proc_req.c
+++ b/src/slurmdbd/proc_req.c
@@ -45,88 +45,92 @@
 #include "src/common/uid.h"
 #include "src/slurmdbd/read_config.h"
 #include "src/slurmdbd/rpc_mgr.h"
+#include "src/slurmdbd/proc_req.h"
 #include "src/slurmctld/slurmctld.h"
 
 /* Local functions */
-static int   _add_accounts(void *db_conn,
+static int   _add_accounts(slurmdbd_conn_t *slurmdbd_conn,
 			   Buf in_buffer, Buf *out_buffer, uint32_t *uid);
-static int   _add_account_coords(void *db_conn,
-			   Buf in_buffer, Buf *out_buffer, uint32_t *uid);
-static int   _add_assocs(void *db_conn,
+static int   _add_account_coords(slurmdbd_conn_t *slurmdbd_conn,
+				 Buf in_buffer, Buf *out_buffer, uint32_t *uid);
+static int   _add_assocs(slurmdbd_conn_t *slurmdbd_conn,
 			 Buf in_buffer, Buf *out_buffer, uint32_t *uid);
-static int   _add_clusters(void *db_conn,
+static int   _add_clusters(slurmdbd_conn_t *slurmdbd_conn,
 			   Buf in_buffer, Buf *out_buffer, uint32_t *uid);
-static int   _add_qos(void *db_conn,
+static int   _add_qos(slurmdbd_conn_t *slurmdbd_conn,
 		      Buf in_buffer, Buf *out_buffer, uint32_t *uid);
-static int   _add_users(void *db_conn,
+static int   _add_users(slurmdbd_conn_t *slurmdbd_conn,
 			Buf in_buffer, Buf *out_buffer, uint32_t *uid);
-static int   _cluster_procs(void *db_conn,
+static int   _cluster_procs(slurmdbd_conn_t *slurmdbd_conn,
 			    Buf in_buffer, Buf *out_buffer, uint32_t *uid);
-static int   _get_accounts(void *db_conn,
+static int   _get_accounts(slurmdbd_conn_t *slurmdbd_conn,
 			   Buf in_buffer, Buf *out_buffer, uint32_t *uid);
-static int   _get_assocs(void *db_conn,
+static int   _get_assocs(slurmdbd_conn_t *slurmdbd_conn,
 			 Buf in_buffer, Buf *out_buffer, uint32_t *uid);
-static int   _get_clusters(void *db_conn,
+static int   _get_clusters(slurmdbd_conn_t *slurmdbd_conn,
 			   Buf in_buffer, Buf *out_buffer, uint32_t *uid);
-static int   _get_jobs(void *db_conn,
+static int   _get_jobs(slurmdbd_conn_t *slurmdbd_conn,
 		       Buf in_buffer, Buf *out_buffer, uint32_t *uid);
-static int   _get_jobs_cond(void *db_conn,
+static int   _get_jobs_cond(slurmdbd_conn_t *slurmdbd_conn,
 			    Buf in_buffer, Buf *out_buffer, uint32_t *uid);
-static int   _get_qos(void *db_conn,
+static int   _get_qos(slurmdbd_conn_t *slurmdbd_conn,
 		      Buf in_buffer, Buf *out_buffer, uint32_t *uid);
-static int   _get_txn(void *db_conn,
+static int   _get_txn(slurmdbd_conn_t *slurmdbd_conn,
 		      Buf in_buffer, Buf *out_buffer, uint32_t *uid);
-static int   _get_usage(uint16_t type, void *db_conn,
+static int   _get_usage(uint16_t type, slurmdbd_conn_t *slurmdbd_conn,
 			Buf in_buffer, Buf *out_buffer, uint32_t *uid);
-static int   _get_users(void *db_conn,
+static int   _get_users(slurmdbd_conn_t *slurmdbd_conn,
 			Buf in_buffer, Buf *out_buffer, uint32_t *uid);
-static int   _flush_jobs(void *db_conn,
+static int   _flush_jobs(slurmdbd_conn_t *slurmdbd_conn,
 			 Buf in_buffer, Buf *out_buffer, uint32_t *uid);
-static void *_init_conn(Buf in_buffer, Buf *out_buffer, uint32_t *uid);
-static int   _fini_conn(void **db_conn, Buf in_buffer, Buf *out_buffer);
-static int   _job_complete(void *db_conn,
+static int   _init_conn(slurmdbd_conn_t *slurmdbd_conn, 
+			Buf in_buffer, Buf *out_buffer, uint32_t *uid);
+static int   _fini_conn(slurmdbd_conn_t *slurmdbd_conn, Buf in_buffer,
+			Buf *out_buffer);
+static int   _job_complete(slurmdbd_conn_t *slurmdbd_conn,
 			   Buf in_buffer, Buf *out_buffer, uint32_t *uid);
-static int   _job_start(void *db_conn,
+static int   _job_start(slurmdbd_conn_t *slurmdbd_conn,
 			Buf in_buffer, Buf *out_buffer, uint32_t *uid);
-static int   _job_suspend(void *db_conn,
+static int   _job_suspend(slurmdbd_conn_t *slurmdbd_conn,
 			  Buf in_buffer, Buf *out_buffer, uint32_t *uid);
-static int   _modify_accounts(void *db_conn,
+static int   _modify_accounts(slurmdbd_conn_t *slurmdbd_conn,
 			      Buf in_buffer, Buf *out_buffer, uint32_t *uid);
-static int   _modify_assocs(void *db_conn,
+static int   _modify_assocs(slurmdbd_conn_t *slurmdbd_conn,
 			    Buf in_buffer, Buf *out_buffer, uint32_t *uid);
-static int   _modify_clusters(void *db_conn,
+static int   _modify_clusters(slurmdbd_conn_t *slurmdbd_conn,
 			      Buf in_buffer, Buf *out_buffer, uint32_t *uid);
-static int   _modify_users(void *db_conn,
+static int   _modify_users(slurmdbd_conn_t *slurmdbd_conn,
 			   Buf in_buffer, Buf *out_buffer, uint32_t *uid);
-static int   _node_state(void *db_conn,
+static int   _node_state(slurmdbd_conn_t *slurmdbd_conn,
 			 Buf in_buffer, Buf *out_buffer, uint32_t *uid);
 static char *_node_state_string(uint16_t node_state);
-static int   _register_ctld(void *db_conn, slurm_fd orig_fd,
+static int   _register_ctld(slurmdbd_conn_t *slurmdbd_conn,
 			    Buf in_buffer, Buf *out_buffer, uint32_t *uid);
-static int   _remove_accounts(void *db_conn,
+static int   _remove_accounts(slurmdbd_conn_t *slurmdbd_conn,
 			      Buf in_buffer, Buf *out_buffer, uint32_t *uid);
-static int   _remove_account_coords(void *db_conn,
+static int   _remove_account_coords(slurmdbd_conn_t *slurmdbd_conn,
 				    Buf in_buffer, Buf *out_buffer,
 				    uint32_t *uid);
-static int   _remove_assocs(void *db_conn,
+static int   _remove_assocs(slurmdbd_conn_t *slurmdbd_conn,
 			    Buf in_buffer, Buf *out_buffer, uint32_t *uid);
-static int   _remove_clusters(void *db_conn,
+static int   _remove_clusters(slurmdbd_conn_t *slurmdbd_conn,
 			      Buf in_buffer, Buf *out_buffer, uint32_t *uid);
-static int   _remove_qos(void *db_conn,
+static int   _remove_qos(slurmdbd_conn_t *slurmdbd_conn,
 			 Buf in_buffer, Buf *out_buffer, uint32_t *uid);
-static int   _remove_users(void *db_conn,
+static int   _remove_users(slurmdbd_conn_t *slurmdbd_conn,
 			   Buf in_buffer, Buf *out_buffer, uint32_t *uid);
-static int   _roll_usage(void *db_conn,
+static int   _roll_usage(slurmdbd_conn_t *slurmdbd_conn,
 			 Buf in_buffer, Buf *out_buffer, uint32_t *uid);
-static int   _step_complete(void *db_conn,
+static int   _step_complete(slurmdbd_conn_t *slurmdbd_conn,
 			    Buf in_buffer, Buf *out_buffer, uint32_t *uid);
-static int   _step_start(void *db_conn,
+static int   _step_start(slurmdbd_conn_t *slurmdbd_conn,
 			 Buf in_buffer, Buf *out_buffer, uint32_t *uid);
-static int   _update_shares_used(void *db_conn,
+static int   _update_shares_used(slurmdbd_conn_t *slurmdbd_conn,
 				 Buf in_buffer, Buf *out_buffer, uint32_t *uid);
 
 /* Process an incoming RPC
- * orig_fd IN - originating file descriptor of the RPC
+ * slurmdbd_conn IN/OUT - in will that the newsockfd set before
+ *       calling and db_conn and rpc_version will be filled in with the init.
  * msg IN - incoming message
  * msg_size IN - size of msg in bytes
  * first IN - set if first message received on the socket
@@ -134,7 +138,7 @@ static int   _update_shares_used(void *db_conn,
  * uid IN/OUT - user ID who initiated the RPC
  * RET SLURM_SUCCESS or error code */
 extern int 
-proc_req(void **db_conn, slurm_fd orig_fd, 
+proc_req(slurmdbd_conn_t *slurmdbd_conn, 
 	 char *msg, uint32_t msg_size,
 	 bool first, Buf *out_buffer, uint32_t *uid)
 {
@@ -150,163 +154,177 @@ proc_req(void **db_conn, slurm_fd orig_fd,
 		comment = "Initial RPC not DBD_INIT";
 		error("%s type (%d)", comment, msg_type);
 		rc = EINVAL;
-		*out_buffer = make_dbd_rc_msg(rc, comment, DBD_INIT);
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+					      rc, comment, DBD_INIT);
 	} else {
 		switch (msg_type) {
 		case DBD_ADD_ACCOUNTS:
-			rc = _add_accounts(*db_conn,
+			rc = _add_accounts(slurmdbd_conn,
 					   in_buffer, out_buffer, uid);
 			break;
 		case DBD_ADD_ACCOUNT_COORDS:
-			rc = _add_account_coords(*db_conn,
+			rc = _add_account_coords(slurmdbd_conn,
 						 in_buffer, out_buffer, uid);
 			break;
 		case DBD_ADD_ASSOCS:
-			rc = _add_assocs(*db_conn, in_buffer, out_buffer, uid);
+			rc = _add_assocs(slurmdbd_conn,
+					 in_buffer, out_buffer, uid);
 			break;
 		case DBD_ADD_CLUSTERS:
-			rc = _add_clusters(*db_conn,
+			rc = _add_clusters(slurmdbd_conn,
 					   in_buffer, out_buffer, uid);
 			break;
 		case DBD_ADD_QOS:
-			rc = _add_qos(*db_conn, in_buffer, out_buffer, uid);
+			rc = _add_qos(slurmdbd_conn,
+				      in_buffer, out_buffer, uid);
 			break;
 		case DBD_ADD_USERS:
-			rc = _add_users(*db_conn, in_buffer, out_buffer, uid);
+			rc = _add_users(slurmdbd_conn, 
+					in_buffer, out_buffer, uid);
 			break;
 		case DBD_CLUSTER_PROCS:
-			rc = _cluster_procs(*db_conn,
+			rc = _cluster_procs(slurmdbd_conn,
 					    in_buffer, out_buffer, uid);
 			break;
 		case DBD_GET_ACCOUNTS:
-			rc = _get_accounts(*db_conn, 
+			rc = _get_accounts(slurmdbd_conn, 
 					   in_buffer, out_buffer, uid);
 			break;
 		case DBD_GET_ASSOCS:
-			rc = _get_assocs(*db_conn, in_buffer, out_buffer, uid);
+			rc = _get_assocs(slurmdbd_conn,
+					 in_buffer, out_buffer, uid);
 			break;
 		case DBD_GET_ASSOC_USAGE:
 		case DBD_GET_CLUSTER_USAGE:
-			rc = _get_usage(msg_type, *db_conn,
+			rc = _get_usage(msg_type, slurmdbd_conn,
 					in_buffer, out_buffer, uid);
 			break;
 		case DBD_GET_CLUSTERS:
-			rc = _get_clusters(*db_conn,
+			rc = _get_clusters(slurmdbd_conn,
 					   in_buffer, out_buffer, uid);
 			break;
 		case DBD_GET_JOBS:
-			rc = _get_jobs(*db_conn, in_buffer, out_buffer, uid);
+			rc = _get_jobs(slurmdbd_conn,
+				       in_buffer, out_buffer, uid);
 			break;
 		case DBD_GET_JOBS_COND:
-			rc = _get_jobs_cond(*db_conn, 
+			rc = _get_jobs_cond(slurmdbd_conn, 
 					    in_buffer, out_buffer, uid);
 			break;
 		case DBD_GET_QOS:
-			rc = _get_qos(*db_conn, in_buffer, out_buffer, uid);
+			rc = _get_qos(slurmdbd_conn,
+				      in_buffer, out_buffer, uid);
 			break;
 		case DBD_GET_TXN:
-			rc = _get_txn(*db_conn, in_buffer, out_buffer, uid);
+			rc = _get_txn(slurmdbd_conn,
+				      in_buffer, out_buffer, uid);
 			break;
 		case DBD_GET_USERS:
-			rc = _get_users(*db_conn, in_buffer, out_buffer, uid);
+			rc = _get_users(slurmdbd_conn,
+					in_buffer, out_buffer, uid);
 			break;
 		case DBD_FLUSH_JOBS:
-			rc = _flush_jobs(*db_conn, in_buffer, out_buffer, uid);
+			rc = _flush_jobs(slurmdbd_conn,
+					 in_buffer, out_buffer, uid);
 			break;
 		case DBD_INIT:
 			if (first)
-				(*db_conn) = _init_conn(
-					in_buffer, out_buffer, uid);
+				rc = _init_conn(slurmdbd_conn, 
+						in_buffer, out_buffer, uid);
 			else {
 				comment = "DBD_INIT sent after connection established";
 				error("%s", comment);
 				rc = EINVAL;
-				*out_buffer = make_dbd_rc_msg(rc, comment,
-							      DBD_INIT);
+				*out_buffer = make_dbd_rc_msg(
+					slurmdbd_conn->rpc_version, rc, comment,
+					DBD_INIT);
 			}
 			break;
 		case DBD_FINI:
-			rc = _fini_conn(db_conn, in_buffer, out_buffer);
+			rc = _fini_conn(slurmdbd_conn, in_buffer, out_buffer);
 			break;
 		case DBD_JOB_COMPLETE:
-			rc = _job_complete(*db_conn,
+			rc = _job_complete(slurmdbd_conn,
 					   in_buffer, out_buffer, uid);
 			break;
 		case DBD_JOB_START:
-			rc = _job_start(*db_conn,
+			rc = _job_start(slurmdbd_conn,
 					in_buffer, out_buffer, uid);
 			break;
 		case DBD_JOB_SUSPEND:
-			rc = _job_suspend(*db_conn,
+			rc = _job_suspend(slurmdbd_conn,
 					  in_buffer, out_buffer, uid);
 			break;
 		case DBD_MODIFY_ACCOUNTS:
-			rc = _modify_accounts(*db_conn,
+			rc = _modify_accounts(slurmdbd_conn,
 					      in_buffer, out_buffer, uid);
 			break;
 		case DBD_MODIFY_ASSOCS:
-			rc = _modify_assocs(*db_conn,
+			rc = _modify_assocs(slurmdbd_conn,
 					    in_buffer, out_buffer, uid);
 			break;
 		case DBD_MODIFY_CLUSTERS:
-			rc = _modify_clusters(*db_conn,
+			rc = _modify_clusters(slurmdbd_conn,
 					      in_buffer, out_buffer, uid);
 			break;
 		case DBD_MODIFY_USERS:
-			rc = _modify_users(*db_conn,
+			rc = _modify_users(slurmdbd_conn,
 					   in_buffer, out_buffer, uid);
 			break;
 		case DBD_NODE_STATE:
-			rc = _node_state(*db_conn,
+			rc = _node_state(slurmdbd_conn,
 					 in_buffer, out_buffer, uid);
 			break;
 		case DBD_REGISTER_CTLD:
-			rc = _register_ctld(*db_conn, orig_fd, in_buffer, 
+			rc = _register_ctld(slurmdbd_conn, in_buffer, 
 					    out_buffer, uid);
 			break;
 		case DBD_REMOVE_ACCOUNTS:
-			rc = _remove_accounts(*db_conn,
+			rc = _remove_accounts(slurmdbd_conn,
 					      in_buffer, out_buffer, uid);
 			break;
 		case DBD_REMOVE_ACCOUNT_COORDS:
-			rc = _remove_account_coords(*db_conn,
-						 in_buffer, out_buffer, uid);
+			rc = _remove_account_coords(slurmdbd_conn,
+						    in_buffer, out_buffer, uid);
 			break;
 		case DBD_REMOVE_ASSOCS:
-			rc = _remove_assocs(*db_conn,
+			rc = _remove_assocs(slurmdbd_conn,
 					    in_buffer, out_buffer, uid);
 			break;
 		case DBD_REMOVE_CLUSTERS:
-			rc = _remove_clusters(*db_conn,
+			rc = _remove_clusters(slurmdbd_conn,
 					      in_buffer, out_buffer, uid);
 			break;
 		case DBD_REMOVE_QOS:
-			rc = _remove_qos(*db_conn, in_buffer, out_buffer, uid);
+			rc = _remove_qos(slurmdbd_conn,
+					 in_buffer, out_buffer, uid);
 			break;
 		case DBD_REMOVE_USERS:
-			rc = _remove_users(*db_conn,
+			rc = _remove_users(slurmdbd_conn,
 					   in_buffer, out_buffer, uid);
 			break;
 		case DBD_ROLL_USAGE:
-			rc = _roll_usage(*db_conn, in_buffer, out_buffer, uid);
+			rc = _roll_usage(slurmdbd_conn, 
+					 in_buffer, out_buffer, uid);
 			break;
 		case DBD_STEP_COMPLETE:
-			rc = _step_complete(*db_conn,
+			rc = _step_complete(slurmdbd_conn,
 					    in_buffer, out_buffer, uid);
 			break;
 		case DBD_STEP_START:
-			rc = _step_start(*db_conn,
+			rc = _step_start(slurmdbd_conn,
 					 in_buffer, out_buffer, uid);
 			break;
 		case DBD_UPDATE_SHARES_USED:
-			rc = _update_shares_used(*db_conn,
+			rc = _update_shares_used(slurmdbd_conn,
 						 in_buffer, out_buffer, uid);
 			break;
 		default:
 			comment = "Invalid RPC";
 			error("%s msg_type=%d", comment, msg_type);
 			rc = EINVAL;
-			*out_buffer = make_dbd_rc_msg(rc, comment, 0);
+			*out_buffer = make_dbd_rc_msg(
+				slurmdbd_conn->rpc_version, rc, comment, 0);
 			break;
 		}
 	}
@@ -320,7 +338,7 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-static int _add_accounts(void *db_conn,
+static int _add_accounts(slurmdbd_conn_t *slurmdbd_conn,
 			 Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
 	int rc = SLURM_SUCCESS;
@@ -329,12 +347,14 @@ static int _add_accounts(void *db_conn,
 
 	debug2("DBD_ADD_ACCOUNTS: called");
 	if((*uid != slurmdbd_conf->slurm_user_id && *uid != 0)
-	   && assoc_mgr_get_admin_level(db_conn, *uid) < ACCT_ADMIN_OPERATOR) {
+	   && assoc_mgr_get_admin_level(slurmdbd_conn->db_conn, *uid) 
+	   < ACCT_ADMIN_OPERATOR) {
 		acct_user_rec_t user;
 
 		memset(&user, 0, sizeof(acct_user_rec_t));
 		user.uid = *uid;
-		if(assoc_mgr_fill_in_user(db_conn, &user, 1) != SLURM_SUCCESS) {
+		if(assoc_mgr_fill_in_user(slurmdbd_conn->db_conn, &user, 1)
+		   != SLURM_SUCCESS) {
 			comment = "Your user has not been added to the accounting system yet.";
 			error("%s", comment);
 			rc = SLURM_ERROR;
@@ -353,7 +373,8 @@ static int _add_accounts(void *db_conn,
 		 */		
 	}
 
-	if (slurmdbd_unpack_list_msg(DBD_ADD_ACCOUNTS, &get_msg, in_buffer) !=
+	if (slurmdbd_unpack_list_msg(slurmdbd_conn->rpc_version, 
+				     DBD_ADD_ACCOUNTS, &get_msg, in_buffer) !=
 	    SLURM_SUCCESS) {
 		comment = "Failed to unpack DBD_ADD_ACCOUNTS message";
 		error("%s", comment);
@@ -361,20 +382,24 @@ static int _add_accounts(void *db_conn,
 		goto end_it;
 	}
 	
-	rc = acct_storage_g_add_accounts(db_conn, *uid, get_msg->my_list);
+	rc = acct_storage_g_add_accounts(slurmdbd_conn->db_conn, *uid,
+					 get_msg->my_list);
 end_it:
-	slurmdbd_free_list_msg(get_msg);
-	*out_buffer = make_dbd_rc_msg(rc, comment, DBD_ADD_ACCOUNTS);
+	slurmdbd_free_list_msg(slurmdbd_conn->rpc_version, 
+			       get_msg);
+	*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+				      rc, comment, DBD_ADD_ACCOUNTS);
 	return rc;
 }
-static int _add_account_coords(void *db_conn,
+static int _add_account_coords(slurmdbd_conn_t *slurmdbd_conn,
 			       Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
 	int rc = SLURM_SUCCESS;
 	dbd_acct_coord_msg_t *get_msg = NULL;
 	char *comment = NULL;
 	
-	if (slurmdbd_unpack_acct_coord_msg(&get_msg, in_buffer) !=
+	if (slurmdbd_unpack_acct_coord_msg(slurmdbd_conn->rpc_version, 
+					   &get_msg, in_buffer) !=
 	    SLURM_SUCCESS) {
 		comment = "Failed to unpack DBD_ADD_ACCOUNT_COORDS message";
 		error("%s", comment);
@@ -384,7 +409,8 @@ static int _add_account_coords(void *db_conn,
 	
 	debug2("DBD_ADD_ACCOUNT_COORDS: called");
 	if((*uid != slurmdbd_conf->slurm_user_id && *uid != 0)
-	   && assoc_mgr_get_admin_level(db_conn, *uid) < ACCT_ADMIN_OPERATOR) {
+	   && assoc_mgr_get_admin_level(slurmdbd_conn->db_conn, *uid)
+	   < ACCT_ADMIN_OPERATOR) {
 		ListIterator itr = NULL;
 		ListIterator itr2 = NULL;
 		acct_user_rec_t user;
@@ -394,7 +420,8 @@ static int _add_account_coords(void *db_conn,
 
 		memset(&user, 0, sizeof(acct_user_rec_t));
 		user.uid = *uid;
-		if(assoc_mgr_fill_in_user(db_conn, &user, 1) != SLURM_SUCCESS) {
+		if(assoc_mgr_fill_in_user(slurmdbd_conn->db_conn, &user, 1) 
+		   != SLURM_SUCCESS) {
 			comment = "Your user has not been added to the accounting system yet.";
 			error("%s", comment);
 			rc = SLURM_ERROR;
@@ -430,16 +457,18 @@ static int _add_account_coords(void *db_conn,
 		}
 	}
 
-	rc = acct_storage_g_add_coord(db_conn, *uid, get_msg->acct_list,
-				      get_msg->cond);
+	rc = acct_storage_g_add_coord(slurmdbd_conn->db_conn, *uid, 
+				      get_msg->acct_list, get_msg->cond);
 end_it:
-	slurmdbd_free_acct_coord_msg(get_msg);
-	*out_buffer = make_dbd_rc_msg(rc, comment, DBD_ADD_ACCOUNT_COORDS);
+	slurmdbd_free_acct_coord_msg(slurmdbd_conn->rpc_version, 
+				     get_msg);
+	*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+				      rc, comment, DBD_ADD_ACCOUNT_COORDS);
 	return rc;
 }
 
-static int _add_assocs(void *db_conn,
-			 Buf in_buffer, Buf *out_buffer, uint32_t *uid)
+static int _add_assocs(slurmdbd_conn_t *slurmdbd_conn,
+		       Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
 	int rc = SLURM_SUCCESS;
 	dbd_list_msg_t *get_msg = NULL;
@@ -447,7 +476,8 @@ static int _add_assocs(void *db_conn,
 
 	debug2("DBD_ADD_ASSOCS: called");
 
-	if (slurmdbd_unpack_list_msg(DBD_ADD_ASSOCS, &get_msg, in_buffer) !=
+	if (slurmdbd_unpack_list_msg(slurmdbd_conn->rpc_version, 
+				     DBD_ADD_ASSOCS, &get_msg, in_buffer) !=
 	    SLURM_SUCCESS) {
 		comment = "Failed to unpack DBD_ADD_ASSOCS message";
 		error("%s", comment);
@@ -456,7 +486,8 @@ static int _add_assocs(void *db_conn,
 	}
 	
 	if((*uid != slurmdbd_conf->slurm_user_id && *uid != 0)
-	   && assoc_mgr_get_admin_level(db_conn, *uid) < ACCT_ADMIN_OPERATOR) {
+	   && assoc_mgr_get_admin_level(slurmdbd_conn->db_conn, *uid) 
+	   < ACCT_ADMIN_OPERATOR) {
 		ListIterator itr = NULL;
 		ListIterator itr2 = NULL;
 		acct_user_rec_t user;
@@ -465,7 +496,8 @@ static int _add_assocs(void *db_conn,
 
 		memset(&user, 0, sizeof(acct_user_rec_t));
 		user.uid = *uid;
-		if(assoc_mgr_fill_in_user(db_conn, &user, 1) != SLURM_SUCCESS) {
+		if(assoc_mgr_fill_in_user(slurmdbd_conn->db_conn, &user, 1)
+		   != SLURM_SUCCESS) {
 			comment = "Your user has not been added to the accounting system yet.";
 			error("%s", comment);
 			rc = SLURM_ERROR;
@@ -503,14 +535,17 @@ static int _add_assocs(void *db_conn,
 		}
 	}
 
-	rc = acct_storage_g_add_associations(db_conn, *uid, get_msg->my_list);
+	rc = acct_storage_g_add_associations(slurmdbd_conn->db_conn, *uid,
+					     get_msg->my_list);
 end_it:
-	slurmdbd_free_list_msg(get_msg);
-	*out_buffer = make_dbd_rc_msg(rc, comment, DBD_ADD_ASSOCS);
+	slurmdbd_free_list_msg(slurmdbd_conn->rpc_version, 
+			       get_msg);
+	*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+				      rc, comment, DBD_ADD_ASSOCS);
 	return rc;
 }
 
-static int _add_clusters(void *db_conn,
+static int _add_clusters(slurmdbd_conn_t *slurmdbd_conn,
 			 Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
 	int rc = SLURM_SUCCESS;
@@ -519,14 +554,16 @@ static int _add_clusters(void *db_conn,
 
 	debug2("DBD_ADD_CLUSTERS: called");
 	if((*uid != slurmdbd_conf->slurm_user_id && *uid != 0)
-	   && assoc_mgr_get_admin_level(db_conn, *uid) < ACCT_ADMIN_SUPER_USER) {
+	   && assoc_mgr_get_admin_level(slurmdbd_conn->db_conn, *uid) 
+	   < ACCT_ADMIN_SUPER_USER) {
 		comment = "Your user doesn't have privilege to preform this action";
 		error("%s", comment);
 		rc = ESLURM_ACCESS_DENIED;
 		goto end_it;
 	}
 
-	if (slurmdbd_unpack_list_msg(DBD_ADD_CLUSTERS, &get_msg, in_buffer) !=
+	if (slurmdbd_unpack_list_msg(slurmdbd_conn->rpc_version, 
+				     DBD_ADD_CLUSTERS, &get_msg, in_buffer) !=
 	    SLURM_SUCCESS) {
 		comment = "Failed to unpack DBD_ADD_CLUSTERS message";
 		error("%s", comment);
@@ -534,17 +571,20 @@ static int _add_clusters(void *db_conn,
 		goto end_it;
 	}
 	
-	rc = acct_storage_g_add_clusters(db_conn, *uid, get_msg->my_list);
+	rc = acct_storage_g_add_clusters(slurmdbd_conn->db_conn, *uid, 
+					 get_msg->my_list);
 	if(rc != SLURM_SUCCESS) 
 		comment = "Failed to add cluster.";
 
 end_it:
-	slurmdbd_free_list_msg(get_msg);
-	*out_buffer = make_dbd_rc_msg(rc, comment, DBD_ADD_CLUSTERS);
+	slurmdbd_free_list_msg(slurmdbd_conn->rpc_version, 
+			       get_msg);
+	*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+				      rc, comment, DBD_ADD_CLUSTERS);
 	return rc;
 }
 
-static int _add_qos(void *db_conn,
+static int _add_qos(slurmdbd_conn_t *slurmdbd_conn,
 		    Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
 	int rc = SLURM_SUCCESS;
@@ -553,7 +593,7 @@ static int _add_qos(void *db_conn,
 
 	debug2("DBD_ADD_QOS: called");
 	if((*uid != slurmdbd_conf->slurm_user_id && *uid != 0)
-	   && (assoc_mgr_get_admin_level(db_conn, *uid) 
+	   && (assoc_mgr_get_admin_level(slurmdbd_conn->db_conn, *uid) 
 	       < ACCT_ADMIN_SUPER_USER)) {
 		comment = "Your user doesn't have privilege to preform this action";
 		error("%s", comment);
@@ -561,7 +601,8 @@ static int _add_qos(void *db_conn,
 		goto end_it;
 	}
 
-	if (slurmdbd_unpack_list_msg(DBD_ADD_QOS, &get_msg, in_buffer) !=
+	if (slurmdbd_unpack_list_msg(slurmdbd_conn->rpc_version, 
+				     DBD_ADD_QOS, &get_msg, in_buffer) !=
 	    SLURM_SUCCESS) {
 		comment = "Failed to unpack DBD_ADD_QOS message";
 		error("%s", comment);
@@ -569,17 +610,20 @@ static int _add_qos(void *db_conn,
 		goto end_it;
 	}
 	
-	rc = acct_storage_g_add_qos(db_conn, *uid, get_msg->my_list);
+	rc = acct_storage_g_add_qos(slurmdbd_conn->db_conn, *uid,
+				    get_msg->my_list);
 	if(rc != SLURM_SUCCESS) 
 		comment = "Failed to add qos.";
 
 end_it:
-	slurmdbd_free_list_msg(get_msg);
-	*out_buffer = make_dbd_rc_msg(rc, comment, DBD_ADD_QOS);
+	slurmdbd_free_list_msg(slurmdbd_conn->rpc_version, 
+			       get_msg);
+	*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+				      rc, comment, DBD_ADD_QOS);
 	return rc;
 }
 
-static int _add_users(void *db_conn,
+static int _add_users(slurmdbd_conn_t *slurmdbd_conn,
 		      Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
 	int rc = SLURM_SUCCESS;
@@ -587,12 +631,14 @@ static int _add_users(void *db_conn,
 	char *comment = NULL;
 	debug2("DBD_ADD_USERS: called");
 	if((*uid != slurmdbd_conf->slurm_user_id && *uid != 0)
-	   && assoc_mgr_get_admin_level(db_conn, *uid) < ACCT_ADMIN_OPERATOR) {
+	   && assoc_mgr_get_admin_level(slurmdbd_conn->db_conn, *uid) 
+	   < ACCT_ADMIN_OPERATOR) {
 		acct_user_rec_t user;
 
 		memset(&user, 0, sizeof(acct_user_rec_t));
 		user.uid = *uid;
-		if(assoc_mgr_fill_in_user(db_conn, &user, 1) != SLURM_SUCCESS) {
+		if(assoc_mgr_fill_in_user(slurmdbd_conn->db_conn, &user, 1) 
+		   != SLURM_SUCCESS) {
 			comment = "Your user has not been added to the accounting system yet.";
 			error("%s", comment);
 			rc = SLURM_ERROR;
@@ -611,7 +657,8 @@ static int _add_users(void *db_conn,
 		 */		
 	}
 
-	if (slurmdbd_unpack_list_msg(DBD_ADD_USERS, &get_msg, in_buffer) !=
+	if (slurmdbd_unpack_list_msg(slurmdbd_conn->rpc_version, 
+				     DBD_ADD_USERS, &get_msg, in_buffer) !=
 	    SLURM_SUCCESS) {
 		comment = "Failed to unpack DBD_ADD_USERS message";
 		error("%s", comment);
@@ -619,15 +666,18 @@ static int _add_users(void *db_conn,
 		goto end_it;
 	}
 	
-	rc = acct_storage_g_add_users(db_conn, *uid, get_msg->my_list);
+	rc = acct_storage_g_add_users(slurmdbd_conn->db_conn, *uid, 
+				      get_msg->my_list);
 
 end_it:
-	slurmdbd_free_list_msg(get_msg);
-	*out_buffer = make_dbd_rc_msg(rc, comment, DBD_ADD_USERS);
+	slurmdbd_free_list_msg(slurmdbd_conn->rpc_version, 
+			       get_msg);
+	*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+				      rc, comment, DBD_ADD_USERS);
 	return rc;
 }
 
-static int _cluster_procs(void *db_conn,
+static int _cluster_procs(slurmdbd_conn_t *slurmdbd_conn,
 			  Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
 	dbd_cluster_procs_msg_t *cluster_procs_msg = NULL;
@@ -640,7 +690,8 @@ static int _cluster_procs(void *db_conn,
 		rc = ESLURM_ACCESS_DENIED;
 		goto end_it;
 	}
-	if (slurmdbd_unpack_cluster_procs_msg(&cluster_procs_msg, in_buffer) !=
+	if (slurmdbd_unpack_cluster_procs_msg(slurmdbd_conn->rpc_version, 
+					      &cluster_procs_msg, in_buffer) !=
 	    SLURM_SUCCESS) {
 		comment = "Failed to unpack DBD_CLUSTER_PROCS message";
 		error("%s", comment);
@@ -652,17 +703,19 @@ static int _cluster_procs(void *db_conn,
 	       cluster_procs_msg->proc_count);
 
 	rc = clusteracct_storage_g_cluster_procs(
-		db_conn,
+		slurmdbd_conn->db_conn,
 		cluster_procs_msg->cluster_name,
 		cluster_procs_msg->proc_count,
 		cluster_procs_msg->event_time);
 end_it:
-	slurmdbd_free_cluster_procs_msg(cluster_procs_msg);
-	*out_buffer = make_dbd_rc_msg(rc, comment, DBD_CLUSTER_PROCS);
+	slurmdbd_free_cluster_procs_msg(slurmdbd_conn->rpc_version, 
+					cluster_procs_msg);
+	*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+				      rc, comment, DBD_CLUSTER_PROCS);
 	return rc;
 }
 
-static int _get_accounts(void *db_conn, 
+static int _get_accounts(slurmdbd_conn_t *slurmdbd_conn, 
 			 Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
 	dbd_cond_msg_t *get_msg = NULL;
@@ -670,30 +723,36 @@ static int _get_accounts(void *db_conn,
 	char *comment = NULL;
 
 	debug2("DBD_GET_ACCOUNTS: called");
-	if (slurmdbd_unpack_cond_msg(DBD_GET_ACCOUNTS, &get_msg, in_buffer) !=
+	if (slurmdbd_unpack_cond_msg(slurmdbd_conn->rpc_version, 
+				     DBD_GET_ACCOUNTS, &get_msg, in_buffer) !=
 	    SLURM_SUCCESS) {
 		comment = "Failed to unpack DBD_GET_ACCOUNTS message";
 		error("%s", comment);
-		*out_buffer = make_dbd_rc_msg(SLURM_ERROR, comment,
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+					      SLURM_ERROR, comment,
 					      DBD_GET_ACCOUNTS);
 		return SLURM_ERROR;
 	}
 	
-	list_msg.my_list = acct_storage_g_get_accounts(db_conn, *uid,
-						       get_msg->cond);
-	slurmdbd_free_cond_msg(DBD_GET_ACCOUNTS, get_msg);
+	list_msg.my_list = acct_storage_g_get_accounts(slurmdbd_conn->db_conn,
+						       *uid, get_msg->cond);
+	slurmdbd_free_cond_msg(slurmdbd_conn->rpc_version, 
+			       DBD_GET_ACCOUNTS, get_msg);
 
+	if(errno == ESLURM_ACCESS_DENIED && !list_msg.my_list)
+		list_msg.my_list = list_create(NULL);
 
 	*out_buffer = init_buf(1024);
 	pack16((uint16_t) DBD_GOT_ACCOUNTS, *out_buffer);
-	slurmdbd_pack_list_msg(DBD_GOT_ACCOUNTS, &list_msg, *out_buffer);
+	slurmdbd_pack_list_msg(slurmdbd_conn->rpc_version, 
+			       DBD_GOT_ACCOUNTS, &list_msg, *out_buffer);
 	if(list_msg.my_list)
 		list_destroy(list_msg.my_list);
 
 	return SLURM_SUCCESS;
 }
 
-static int _get_assocs(void *db_conn, 
+static int _get_assocs(slurmdbd_conn_t *slurmdbd_conn, 
 		       Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
 	dbd_cond_msg_t *get_msg = NULL;
@@ -701,30 +760,36 @@ static int _get_assocs(void *db_conn,
 	char *comment = NULL;
 
 	debug2("DBD_GET_ASSOCS: called");
-	if (slurmdbd_unpack_cond_msg(DBD_GET_ASSOCS, &get_msg, in_buffer) !=
+	if (slurmdbd_unpack_cond_msg(slurmdbd_conn->rpc_version, 
+				     DBD_GET_ASSOCS, &get_msg, in_buffer) !=
 	    SLURM_SUCCESS) {
 		comment = "Failed to unpack DBD_GET_ASSOCS message";
 		error("%s", comment);
-		*out_buffer = make_dbd_rc_msg(SLURM_ERROR, comment,
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+					      SLURM_ERROR, comment,
 					      DBD_GET_ASSOCS);
 		return SLURM_ERROR;
 	}
 	
 	list_msg.my_list = acct_storage_g_get_associations(
-		db_conn, *uid, get_msg->cond);
-	slurmdbd_free_cond_msg(DBD_GET_ASSOCS, get_msg);
+		slurmdbd_conn->db_conn, *uid, get_msg->cond);
+	slurmdbd_free_cond_msg(slurmdbd_conn->rpc_version, 
+			       DBD_GET_ASSOCS, get_msg);
 
+	if(errno == ESLURM_ACCESS_DENIED && !list_msg.my_list)
+		list_msg.my_list = list_create(NULL);
 
 	*out_buffer = init_buf(1024);
 	pack16((uint16_t) DBD_GOT_ASSOCS, *out_buffer);
-	slurmdbd_pack_list_msg(DBD_GOT_ASSOCS, &list_msg, *out_buffer);
+	slurmdbd_pack_list_msg(slurmdbd_conn->rpc_version, 
+			       DBD_GOT_ASSOCS, &list_msg, *out_buffer);
 	if(list_msg.my_list)
 		list_destroy(list_msg.my_list);
 	
 	return SLURM_SUCCESS;
 }
 
-static int _get_clusters(void *db_conn, 
+static int _get_clusters(slurmdbd_conn_t *slurmdbd_conn, 
 			 Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
 	dbd_cond_msg_t *get_msg = NULL;
@@ -732,30 +797,36 @@ static int _get_clusters(void *db_conn,
 	char *comment = NULL;
 
 	debug2("DBD_GET_CLUSTERS: called");
-	if (slurmdbd_unpack_cond_msg(DBD_GET_CLUSTERS, &get_msg, in_buffer) !=
+	if (slurmdbd_unpack_cond_msg(slurmdbd_conn->rpc_version, 
+				     DBD_GET_CLUSTERS, &get_msg, in_buffer) !=
 	    SLURM_SUCCESS) {
 		comment = "Failed to unpack DBD_GET_CLUSTERS message";
 		error("%s", comment);
-		*out_buffer = make_dbd_rc_msg(SLURM_ERROR, comment,
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+					      SLURM_ERROR, comment,
 					      DBD_GET_CLUSTERS);
 		return SLURM_ERROR;
 	}
 	
 	list_msg.my_list = acct_storage_g_get_clusters(
-		db_conn, *uid, get_msg->cond);
-	slurmdbd_free_cond_msg(DBD_GET_CLUSTERS, get_msg);
+		slurmdbd_conn->db_conn, *uid, get_msg->cond);
+	slurmdbd_free_cond_msg(slurmdbd_conn->rpc_version, 
+			       DBD_GET_CLUSTERS, get_msg);
 
+	if(errno == ESLURM_ACCESS_DENIED && !list_msg.my_list)
+		list_msg.my_list = list_create(NULL);
 
 	*out_buffer = init_buf(1024);
 	pack16((uint16_t) DBD_GOT_CLUSTERS, *out_buffer);
-	slurmdbd_pack_list_msg(DBD_GOT_CLUSTERS, &list_msg, *out_buffer);
+	slurmdbd_pack_list_msg(slurmdbd_conn->rpc_version, 
+			       DBD_GOT_CLUSTERS, &list_msg, *out_buffer);
 	if(list_msg.my_list)
 		list_destroy(list_msg.my_list);
 	
 	return SLURM_SUCCESS;
 }
 
-static int _get_jobs(void *db_conn, 
+static int _get_jobs(slurmdbd_conn_t *slurmdbd_conn, 
 		     Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
 	dbd_get_jobs_msg_t *get_jobs_msg = NULL;
@@ -764,11 +835,13 @@ static int _get_jobs(void *db_conn,
 	char *comment = NULL;
 
 	debug2("DBD_GET_JOBS: called");
-	if (slurmdbd_unpack_get_jobs_msg(&get_jobs_msg, in_buffer) !=
+	if (slurmdbd_unpack_get_jobs_msg(slurmdbd_conn->rpc_version, 
+					 &get_jobs_msg, in_buffer) !=
 	    SLURM_SUCCESS) {
 		comment = "Failed to unpack DBD_GET_JOBS message";
 		error("%s", comment);
-		*out_buffer = make_dbd_rc_msg(SLURM_ERROR, comment, 
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+					      SLURM_ERROR, comment, 
 					      DBD_GET_JOBS);
 		return SLURM_ERROR;
 	}
@@ -788,24 +861,29 @@ static int _get_jobs(void *db_conn,
 	}
 		
 	list_msg.my_list = jobacct_storage_g_get_jobs(
-		db_conn, *uid,
+		slurmdbd_conn->db_conn, *uid,
 		get_jobs_msg->selected_steps, get_jobs_msg->selected_parts,
 		&sacct_params);
-	slurmdbd_free_get_jobs_msg(get_jobs_msg);
+	slurmdbd_free_get_jobs_msg(slurmdbd_conn->rpc_version, 
+				   get_jobs_msg);
+
+	if(errno == ESLURM_ACCESS_DENIED && !list_msg.my_list)
+		list_msg.my_list = list_create(NULL);
 
 	if(sacct_params.opt_cluster_list)
 		list_destroy(sacct_params.opt_cluster_list);
 
 	*out_buffer = init_buf(1024);
 	pack16((uint16_t) DBD_GOT_JOBS, *out_buffer);
-	slurmdbd_pack_list_msg(DBD_GOT_JOBS, &list_msg, *out_buffer);
+	slurmdbd_pack_list_msg(slurmdbd_conn->rpc_version, 
+			       DBD_GOT_JOBS, &list_msg, *out_buffer);
 	if(list_msg.my_list)
 		list_destroy(list_msg.my_list);
 	
 	return SLURM_SUCCESS;
 }
 
-static int _get_jobs_cond(void *db_conn, 
+static int _get_jobs_cond(slurmdbd_conn_t *slurmdbd_conn, 
 			  Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
 	dbd_cond_msg_t *cond_msg = NULL;
@@ -813,59 +891,74 @@ static int _get_jobs_cond(void *db_conn,
 	char *comment = NULL;
 
 	debug2("DBD_GET_JOBS_COND: called");
-	if (slurmdbd_unpack_cond_msg(DBD_GET_JOBS_COND, &cond_msg, in_buffer) !=
+	if (slurmdbd_unpack_cond_msg(slurmdbd_conn->rpc_version, 
+				     DBD_GET_JOBS_COND, &cond_msg, in_buffer) !=
 	    SLURM_SUCCESS) {
 		comment = "Failed to unpack DBD_GET_JOBS_COND message";
 		error("%s", comment);
-		*out_buffer = make_dbd_rc_msg(SLURM_ERROR, comment, 
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+					      SLURM_ERROR, comment, 
 					      DBD_GET_JOBS_COND);
 		return SLURM_ERROR;
 	}
 	
 	list_msg.my_list = jobacct_storage_g_get_jobs_cond(
-		db_conn, *uid, cond_msg->cond);
-	slurmdbd_free_cond_msg(DBD_GET_JOBS_COND, cond_msg);
+		slurmdbd_conn->db_conn, *uid, cond_msg->cond);
+
+	slurmdbd_free_cond_msg(slurmdbd_conn->rpc_version, 
+			       DBD_GET_JOBS_COND, cond_msg);
+
+	if(errno == ESLURM_ACCESS_DENIED && !list_msg.my_list)
+		list_msg.my_list = list_create(NULL);
 
 	*out_buffer = init_buf(1024);
 	pack16((uint16_t) DBD_GOT_JOBS, *out_buffer);
-	slurmdbd_pack_list_msg(DBD_GOT_JOBS, &list_msg, *out_buffer);
+	slurmdbd_pack_list_msg(slurmdbd_conn->rpc_version, 
+			       DBD_GOT_JOBS, &list_msg, *out_buffer);
 	if(list_msg.my_list)
 		list_destroy(list_msg.my_list);
 	
 	return SLURM_SUCCESS;
 }
 
-static int _get_qos(void *db_conn, 
-			  Buf in_buffer, Buf *out_buffer, uint32_t *uid)
+static int _get_qos(slurmdbd_conn_t *slurmdbd_conn, 
+		    Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
 	dbd_cond_msg_t *cond_msg = NULL;
 	dbd_list_msg_t list_msg;
 	char *comment = NULL;
 
 	debug2("DBD_GET_QOS: called");
-	if (slurmdbd_unpack_cond_msg(DBD_GET_QOS, &cond_msg, in_buffer) !=
+	if (slurmdbd_unpack_cond_msg(slurmdbd_conn->rpc_version, 
+				     DBD_GET_QOS, &cond_msg, in_buffer) !=
 	    SLURM_SUCCESS) {
 		comment = "Failed to unpack DBD_GET_QOS message";
 		error("%s", comment);
-		*out_buffer = make_dbd_rc_msg(SLURM_ERROR, comment, 
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+					      SLURM_ERROR, comment, 
 					      DBD_GET_QOS);
 		return SLURM_ERROR;
 	}
 	
-	list_msg.my_list = acct_storage_g_get_qos(db_conn, *uid,
+	list_msg.my_list = acct_storage_g_get_qos(slurmdbd_conn->db_conn, *uid,
 						  cond_msg->cond);
-	slurmdbd_free_cond_msg(DBD_GET_QOS, cond_msg);
+	slurmdbd_free_cond_msg(slurmdbd_conn->rpc_version, 
+			       DBD_GET_QOS, cond_msg);
+
+	if(errno == ESLURM_ACCESS_DENIED && !list_msg.my_list)
+		list_msg.my_list = list_create(NULL);
 
 	*out_buffer = init_buf(1024);
 	pack16((uint16_t) DBD_GOT_QOS, *out_buffer);
-	slurmdbd_pack_list_msg(DBD_GOT_QOS, &list_msg, *out_buffer);
+	slurmdbd_pack_list_msg(slurmdbd_conn->rpc_version, 
+			       DBD_GOT_QOS, &list_msg, *out_buffer);
 	if(list_msg.my_list)
 		list_destroy(list_msg.my_list);
 	
 	return SLURM_SUCCESS;
 }
 
-static int _get_txn(void *db_conn, 
+static int _get_txn(slurmdbd_conn_t *slurmdbd_conn, 
 		    Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
 	dbd_cond_msg_t *cond_msg = NULL;
@@ -873,29 +966,36 @@ static int _get_txn(void *db_conn,
 	char *comment = NULL;
 
 	debug2("DBD_GET_TXN: called");
-	if (slurmdbd_unpack_cond_msg(DBD_GET_TXN, &cond_msg, in_buffer) !=
+	if (slurmdbd_unpack_cond_msg(slurmdbd_conn->rpc_version, 
+				     DBD_GET_TXN, &cond_msg, in_buffer) !=
 	    SLURM_SUCCESS) {
 		comment = "Failed to unpack DBD_GET_TXN message";
 		error("%s", comment);
-		*out_buffer = make_dbd_rc_msg(SLURM_ERROR, comment, 
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+					      SLURM_ERROR, comment, 
 					      DBD_GET_TXN);
 		return SLURM_ERROR;
 	}
 
-	list_msg.my_list = acct_storage_g_get_txn(db_conn, *uid,
+	list_msg.my_list = acct_storage_g_get_txn(slurmdbd_conn->db_conn, *uid,
 						  cond_msg->cond);
-	slurmdbd_free_cond_msg(DBD_GET_TXN, cond_msg);
+	slurmdbd_free_cond_msg(slurmdbd_conn->rpc_version, 
+			       DBD_GET_TXN, cond_msg);
+
+	if(errno == ESLURM_ACCESS_DENIED && !list_msg.my_list)
+		list_msg.my_list = list_create(NULL);
 
 	*out_buffer = init_buf(1024);
 	pack16((uint16_t) DBD_GOT_TXN, *out_buffer);
-	slurmdbd_pack_list_msg(DBD_GOT_TXN, &list_msg, *out_buffer);
+	slurmdbd_pack_list_msg(slurmdbd_conn->rpc_version, 
+			       DBD_GOT_TXN, &list_msg, *out_buffer);
 	if(list_msg.my_list)
 		list_destroy(list_msg.my_list);
 	
 	return SLURM_SUCCESS;
 }
 
-static int _get_usage(uint16_t type, void *db_conn,
+static int _get_usage(uint16_t type, slurmdbd_conn_t *slurmdbd_conn,
 		      Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
 	dbd_usage_msg_t *get_msg = NULL;
@@ -908,11 +1008,13 @@ static int _get_usage(uint16_t type, void *db_conn,
 
 	info("DBD_GET_USAGE: called");
 
-	if (slurmdbd_unpack_usage_msg(type, &get_msg, in_buffer) !=
+	if (slurmdbd_unpack_usage_msg(slurmdbd_conn->rpc_version, 
+				      type, &get_msg, in_buffer) !=
 	    SLURM_SUCCESS) {
 		comment = "Failed to unpack DBD_GET_USAGE message"; 
 		error("%s", comment);
-		*out_buffer = make_dbd_rc_msg(SLURM_ERROR, comment, type);
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+					      SLURM_ERROR, comment, type);
 		return SLURM_ERROR;
 	}
 	switch(type) {
@@ -927,19 +1029,22 @@ static int _get_usage(uint16_t type, void *db_conn,
 	default:
 		comment = "Unknown type of usage to get";
 		error("%s %u", comment, type);
-		*out_buffer = make_dbd_rc_msg(SLURM_ERROR, comment, type);
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+					      SLURM_ERROR, comment, type);
 		return SLURM_ERROR;
 	}		
 
-	rc = (*(my_function))(db_conn, *uid, get_msg->rec,
+	rc = (*(my_function))(slurmdbd_conn->db_conn, *uid, get_msg->rec,
 			      get_msg->start, get_msg->end);
-	slurmdbd_free_usage_msg(type, get_msg);
+	slurmdbd_free_usage_msg(slurmdbd_conn->rpc_version, 
+				type, get_msg);
 
 	if(rc != SLURM_SUCCESS) {
 		comment = "Problem getting usage info";
 		error("%s", comment);
-		*out_buffer = make_dbd_rc_msg(SLURM_ERROR, comment, type);
-		return SLURM_ERROR;
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+					      rc, comment, type);
+		return rc;
 		
 	}
 	memset(&got_msg, 0, sizeof(dbd_usage_msg_t));
@@ -947,12 +1052,13 @@ static int _get_usage(uint16_t type, void *db_conn,
 	get_msg->rec = NULL;
 	*out_buffer = init_buf(1024);
 	pack16((uint16_t) ret_type, *out_buffer);
-	slurmdbd_pack_usage_msg(ret_type, &got_msg, *out_buffer);
+	slurmdbd_pack_usage_msg(slurmdbd_conn->rpc_version, 
+				ret_type, &got_msg, *out_buffer);
 	
 	return SLURM_SUCCESS;
 }
 
-static int _get_users(void *db_conn, 
+static int _get_users(slurmdbd_conn_t *slurmdbd_conn, 
 		      Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
 	dbd_cond_msg_t *get_msg = NULL;
@@ -961,30 +1067,37 @@ static int _get_users(void *db_conn,
 
 	debug2("DBD_GET_USERS: called");
 
-	if (slurmdbd_unpack_cond_msg(DBD_GET_USERS, &get_msg, in_buffer) !=
+	if (slurmdbd_unpack_cond_msg(slurmdbd_conn->rpc_version, 
+				     DBD_GET_USERS, &get_msg, in_buffer) !=
 	    SLURM_SUCCESS) {
 		comment = "Failed to unpack DBD_GET_USERS message";
 		error("%s", comment);
-		*out_buffer = make_dbd_rc_msg(SLURM_ERROR, comment,
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+					      SLURM_ERROR, comment,
 					      DBD_GET_USERS);
 		return SLURM_ERROR;
 	}
 	
-	list_msg.my_list = acct_storage_g_get_users(db_conn, *uid,
-						    get_msg->cond);
-	slurmdbd_free_cond_msg(DBD_GET_USERS, get_msg);
+	list_msg.my_list = acct_storage_g_get_users(slurmdbd_conn->db_conn,
+						    *uid, get_msg->cond);
+	slurmdbd_free_cond_msg(slurmdbd_conn->rpc_version, 
+			       DBD_GET_USERS, get_msg);
+
+	if(errno == ESLURM_ACCESS_DENIED && !list_msg.my_list)
+		list_msg.my_list = list_create(NULL);
 
 	*out_buffer = init_buf(1024);
 	pack16((uint16_t) DBD_GOT_USERS, *out_buffer);
-	slurmdbd_pack_list_msg(DBD_GOT_USERS, &list_msg, *out_buffer);
+	slurmdbd_pack_list_msg(slurmdbd_conn->rpc_version, 
+			       DBD_GOT_USERS, &list_msg, *out_buffer);
 	if(list_msg.my_list)
 		list_destroy(list_msg.my_list);
 	
 	return SLURM_SUCCESS;
 }
 
-static int _flush_jobs(void *db_conn,
-			  Buf in_buffer, Buf *out_buffer, uint32_t *uid)
+static int _flush_jobs(slurmdbd_conn_t *slurmdbd_conn,
+		       Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
 	dbd_cluster_procs_msg_t *cluster_procs_msg = NULL;
 	int rc = SLURM_SUCCESS;
@@ -996,7 +1109,8 @@ static int _flush_jobs(void *db_conn,
 		rc = ESLURM_ACCESS_DENIED;
 		goto end_it;
 	}
-	if (slurmdbd_unpack_cluster_procs_msg(&cluster_procs_msg, in_buffer) !=
+	if (slurmdbd_unpack_cluster_procs_msg(slurmdbd_conn->rpc_version, 
+					      &cluster_procs_msg, in_buffer) !=
 	    SLURM_SUCCESS) {
 		comment = "Failed to unpack DBD_FLUSH_JOBS message";
 		error("%s", comment);
@@ -1007,28 +1121,30 @@ static int _flush_jobs(void *db_conn,
 	       cluster_procs_msg->cluster_name);
 
 	rc = acct_storage_g_flush_jobs_on_cluster(
-		db_conn,
+		slurmdbd_conn->db_conn,
 		cluster_procs_msg->cluster_name,
 		cluster_procs_msg->event_time);
 end_it:
-	slurmdbd_free_cluster_procs_msg(cluster_procs_msg);
-	*out_buffer = make_dbd_rc_msg(rc, comment, DBD_FLUSH_JOBS);
+	slurmdbd_free_cluster_procs_msg(slurmdbd_conn->rpc_version, 
+					cluster_procs_msg);
+	*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+				      rc, comment, DBD_FLUSH_JOBS);
 	return rc;
 }
 
-static void *_init_conn(Buf in_buffer, Buf *out_buffer, uint32_t *uid)
+static int _init_conn(slurmdbd_conn_t *slurmdbd_conn,
+		      Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
 	dbd_init_msg_t *init_msg = NULL;
 	char *comment = NULL;
 	int rc = SLURM_SUCCESS;
-	void *new_conn = NULL;
 
-	if (slurmdbd_unpack_init_msg(&init_msg, in_buffer, 
-				     slurmdbd_conf->auth_info)
+	if ((rc = slurmdbd_unpack_init_msg(slurmdbd_conn->rpc_version, 
+				     &init_msg, in_buffer, 
+					   slurmdbd_conf->auth_info))
 	    != SLURM_SUCCESS) {
 		comment = "Failed to unpack DBD_INIT message";
 		error("%s", comment);
-		rc = SLURM_ERROR;
 		goto end_it;
 	}
 	if ((init_msg->version < SLURMDBD_VERSION_MIN) ||
@@ -1038,27 +1154,36 @@ static void *_init_conn(Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 		      "(%u not between %d and %d)",
 		      init_msg->version, 
 		      SLURMDBD_VERSION_MIN, SLURMDBD_VERSION);
+		rc = SLURM_PROTOCOL_VERSION_ERROR;
 		goto end_it;
 	}
 	*uid = init_msg->uid;
 	
-	debug("DBD_INIT: VERSION:%u UID:%u", init_msg->version, init_msg->uid);
-	new_conn = acct_storage_g_get_connection(false, init_msg->rollback);
-	
+	debug("DBD_INIT: VERSION:%u UID:%u IP:%s CONN:%u",
+	      init_msg->version, init_msg->uid, 
+	      slurmdbd_conn->ip, slurmdbd_conn->newsockfd);
+	slurmdbd_conn->db_conn = acct_storage_g_get_connection(
+		false, slurmdbd_conn->newsockfd, init_msg->rollback);
+	slurmdbd_conn->rpc_version = init_msg->version;
+
 end_it:
-	slurmdbd_free_init_msg(init_msg);
-	*out_buffer = make_dbd_rc_msg(rc, comment, DBD_INIT);
+	slurmdbd_free_init_msg(slurmdbd_conn->rpc_version, 
+			       init_msg);
+	*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+				      rc, comment, DBD_INIT);
 
-	return new_conn;
+	return rc;
 }
 
-static int   _fini_conn(void **db_conn, Buf in_buffer, Buf *out_buffer)
+static int   _fini_conn(slurmdbd_conn_t *slurmdbd_conn, Buf in_buffer,
+			Buf *out_buffer)
 {
 	dbd_fini_msg_t *fini_msg = NULL;
 	char *comment = NULL;
 	int rc = SLURM_SUCCESS;
 
-	if (slurmdbd_unpack_fini_msg(&fini_msg, in_buffer) != SLURM_SUCCESS) {
+	if (slurmdbd_unpack_fini_msg(slurmdbd_conn->rpc_version, 
+				     &fini_msg, in_buffer) != SLURM_SUCCESS) {
 		comment = "Failed to unpack DBD_FINI message";
 		error("%s", comment);
 		rc = SLURM_ERROR;
@@ -1068,18 +1193,21 @@ static int   _fini_conn(void **db_conn, Buf in_buffer, Buf *out_buffer)
 	debug2("DBD_FINI: CLOSE:%u COMMIT:%u",
 	       fini_msg->close_conn, fini_msg->commit);
 	if(fini_msg->close_conn == 1)
-		rc = acct_storage_g_close_connection(db_conn);
+		rc = acct_storage_g_close_connection(&slurmdbd_conn->db_conn);
 	else
-		rc = acct_storage_g_commit((*db_conn), fini_msg->commit);
+		rc = acct_storage_g_commit(slurmdbd_conn->db_conn,
+					   fini_msg->commit);
 end_it:
-	slurmdbd_free_fini_msg(fini_msg);
-	*out_buffer = make_dbd_rc_msg(rc, comment, DBD_FINI);
+	slurmdbd_free_fini_msg(slurmdbd_conn->rpc_version, 
+			       fini_msg);
+	*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+				      rc, comment, DBD_FINI);
 
 	return rc;
 
 }
 
-static int  _job_complete(void *db_conn,
+static int  _job_complete(slurmdbd_conn_t *slurmdbd_conn,
 			  Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
 	dbd_job_comp_msg_t *job_comp_msg = NULL;
@@ -1094,7 +1222,8 @@ static int  _job_complete(void *db_conn,
 		rc = ESLURM_ACCESS_DENIED;
 		goto end_it;
 	}
-	if (slurmdbd_unpack_job_complete_msg(&job_comp_msg, in_buffer) !=
+	if (slurmdbd_unpack_job_complete_msg(slurmdbd_conn->rpc_version, 
+					     &job_comp_msg, in_buffer) !=
 	    SLURM_SUCCESS) {
 		comment = "Failed to unpack DBD_JOB_COMPLETE message";
 		error("%s", comment);
@@ -1118,17 +1247,19 @@ static int  _job_complete(void *db_conn,
 	details.submit_time = job_comp_msg->submit_time;
 
 	job.details = &details;
-	rc = jobacct_storage_g_job_complete(db_conn, &job);
+	rc = jobacct_storage_g_job_complete(slurmdbd_conn->db_conn, &job);
 
 	if(rc && errno == 740) /* meaning data is already there */
 		rc = SLURM_SUCCESS;
 end_it:
-	slurmdbd_free_job_complete_msg(job_comp_msg);
-	*out_buffer = make_dbd_rc_msg(rc, comment, DBD_JOB_COMPLETE);
+	slurmdbd_free_job_complete_msg(slurmdbd_conn->rpc_version, 
+				       job_comp_msg);
+	*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+				      rc, comment, DBD_JOB_COMPLETE);
 	return SLURM_SUCCESS;
 }
 
-static int  _job_start(void *db_conn,
+static int  _job_start(slurmdbd_conn_t *slurmdbd_conn,
 		       Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
 	dbd_job_start_msg_t *job_start_msg = NULL;
@@ -1140,15 +1271,18 @@ static int  _job_start(void *db_conn,
 	if (*uid != slurmdbd_conf->slurm_user_id) {
 		comment = "DBD_JOB_START message from invalid uid";
 		error("%s %u", comment, *uid);
-		*out_buffer = make_dbd_rc_msg(ESLURM_ACCESS_DENIED, comment,
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+					      ESLURM_ACCESS_DENIED, comment,
 					      DBD_JOB_START);
 		return SLURM_ERROR;
 	}
-	if (slurmdbd_unpack_job_start_msg(&job_start_msg, in_buffer) !=
+	if (slurmdbd_unpack_job_start_msg(slurmdbd_conn->rpc_version, 
+					  &job_start_msg, in_buffer) !=
 	    SLURM_SUCCESS) {
 		comment = "Failed to unpack DBD_JOB_START message";
 		error("%s", comment);
-		*out_buffer = make_dbd_rc_msg(SLURM_ERROR, comment,
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+					      SLURM_ERROR, comment,
 					      DBD_JOB_START);
 		return SLURM_ERROR;
 	}
@@ -1176,7 +1310,7 @@ static int  _job_start(void *db_conn,
 
 	job.details = &details;
 
-	if(job.db_index) {
+	if(job.start_time) {
 		debug2("DBD_JOB_START: START CALL ID:%u NAME:%s INX:%u", 
 		       job_start_msg->job_id, job_start_msg->name, 
 		       job.db_index);	
@@ -1184,18 +1318,20 @@ static int  _job_start(void *db_conn,
 		debug2("DBD_JOB_START: ELIGIBLE CALL ID:%u NAME:%s", 
 		       job_start_msg->job_id, job_start_msg->name);
 	}
-	job_start_rc_msg.return_code = jobacct_storage_g_job_start(db_conn,
-								   &job);
+	job_start_rc_msg.return_code = jobacct_storage_g_job_start(
+		slurmdbd_conn->db_conn, job_start_msg->cluster, &job);
 	job_start_rc_msg.db_index = job.db_index;
 
-	slurmdbd_free_job_start_msg(job_start_msg);
+	slurmdbd_free_job_start_msg(slurmdbd_conn->rpc_version, 
+				    job_start_msg);
 	*out_buffer = init_buf(1024);
 	pack16((uint16_t) DBD_JOB_START_RC, *out_buffer);
-	slurmdbd_pack_job_start_rc_msg(&job_start_rc_msg, *out_buffer);
+	slurmdbd_pack_job_start_rc_msg(slurmdbd_conn->rpc_version, 
+				       &job_start_rc_msg, *out_buffer);
 	return SLURM_SUCCESS;
 }
 
-static int  _job_suspend(void *db_conn,
+static int  _job_suspend(slurmdbd_conn_t *slurmdbd_conn,
 			 Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
 	dbd_job_suspend_msg_t *job_suspend_msg = NULL;
@@ -1210,7 +1346,8 @@ static int  _job_suspend(void *db_conn,
 		rc = ESLURM_ACCESS_DENIED;
 		goto end_it;
 	}
-	if (slurmdbd_unpack_job_suspend_msg(&job_suspend_msg, in_buffer) !=
+	if (slurmdbd_unpack_job_suspend_msg(slurmdbd_conn->rpc_version, 
+					    &job_suspend_msg, in_buffer) !=
 	    SLURM_SUCCESS) {
 		comment = "Failed to unpack DBD_JOB_SUSPEND message";
 		error("%s", comment);
@@ -1233,17 +1370,19 @@ static int  _job_suspend(void *db_conn,
 	job.suspend_time = job_suspend_msg->suspend_time;
 
 	job.details = &details;
-	rc = jobacct_storage_g_job_suspend(db_conn, &job);
+	rc = jobacct_storage_g_job_suspend(slurmdbd_conn->db_conn, &job);
 
 	if(rc && errno == 740) /* meaning data is already there */
 		rc = SLURM_SUCCESS;
 end_it:
-	slurmdbd_free_job_suspend_msg(job_suspend_msg);
-	*out_buffer = make_dbd_rc_msg(rc, comment, DBD_JOB_SUSPEND);
+	slurmdbd_free_job_suspend_msg(slurmdbd_conn->rpc_version, 
+				      job_suspend_msg);
+	*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+				      rc, comment, DBD_JOB_SUSPEND);
 	return SLURM_SUCCESS;
 }
 
-static int   _modify_accounts(void *db_conn,
+static int   _modify_accounts(slurmdbd_conn_t *slurmdbd_conn,
 			      Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
 	int rc = SLURM_SUCCESS;
@@ -1253,27 +1392,32 @@ static int   _modify_accounts(void *db_conn,
 
 	debug2("DBD_MODIFY_ACCOUNTS: called");
 	if((*uid != slurmdbd_conf->slurm_user_id && *uid != 0)
-	   && assoc_mgr_get_admin_level(db_conn, *uid) < ACCT_ADMIN_OPERATOR) {
+	   && assoc_mgr_get_admin_level(slurmdbd_conn->db_conn, *uid)
+	   < ACCT_ADMIN_OPERATOR) {
 		comment = "Your user doesn't have privilege to preform this action";
 		error("%s", comment);
-		*out_buffer = make_dbd_rc_msg(ESLURM_ACCESS_DENIED,
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+					      ESLURM_ACCESS_DENIED,
 					      comment, DBD_MODIFY_ACCOUNTS);
 
 		return ESLURM_ACCESS_DENIED;
 	}
 
-	if (slurmdbd_unpack_modify_msg(DBD_MODIFY_ACCOUNTS, &get_msg,
+	if (slurmdbd_unpack_modify_msg(slurmdbd_conn->rpc_version, 
+				       DBD_MODIFY_ACCOUNTS, &get_msg,
 				       in_buffer) != SLURM_SUCCESS) {
 		comment = "Failed to unpack DBD_MODIFY_ACCOUNTS message";
 		error("%s", comment);
-		*out_buffer = make_dbd_rc_msg(SLURM_ERROR,
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+					      SLURM_ERROR,
 					      comment, DBD_MODIFY_ACCOUNTS);
 		return SLURM_ERROR;
 	}
 	
 
 	if(!(list_msg.my_list = acct_storage_g_modify_accounts(
-		     db_conn, *uid, get_msg->cond, get_msg->rec))) {
+		     slurmdbd_conn->db_conn, *uid, get_msg->cond,
+		     get_msg->rec))) {
 		if(errno == ESLURM_ACCESS_DENIED) {
 			comment = "Your user doesn't have privilege to preform this action";
 			rc = ESLURM_ACCESS_DENIED;
@@ -1288,21 +1432,25 @@ static int   _modify_accounts(void *db_conn,
 			rc = SLURM_ERROR;
 		}
 		error("%s", comment);
-		slurmdbd_free_modify_msg(DBD_MODIFY_ACCOUNTS, get_msg);
-		*out_buffer = make_dbd_rc_msg(rc, comment, DBD_MODIFY_ACCOUNTS);
+		slurmdbd_free_modify_msg(slurmdbd_conn->rpc_version, 
+					 DBD_MODIFY_ACCOUNTS, get_msg);
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+					      rc, comment, DBD_MODIFY_ACCOUNTS);
 		return rc;		
 	}
-	slurmdbd_free_modify_msg(DBD_MODIFY_ACCOUNTS, get_msg);
+	slurmdbd_free_modify_msg(slurmdbd_conn->rpc_version, 
+				 DBD_MODIFY_ACCOUNTS, get_msg);
 
 	*out_buffer = init_buf(1024);
 	pack16((uint16_t) DBD_GOT_LIST, *out_buffer);
-	slurmdbd_pack_list_msg(DBD_GOT_LIST, &list_msg, *out_buffer);
+	slurmdbd_pack_list_msg(slurmdbd_conn->rpc_version, 
+			       DBD_GOT_LIST, &list_msg, *out_buffer);
 	if(list_msg.my_list)
 		list_destroy(list_msg.my_list);
 	return rc;
 }
 
-static int   _modify_assocs(void *db_conn,
+static int   _modify_assocs(slurmdbd_conn_t *slurmdbd_conn,
 			    Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
 	int rc = SLURM_SUCCESS;
@@ -1312,11 +1460,13 @@ static int   _modify_assocs(void *db_conn,
 
 	debug2("DBD_MODIFY_ASSOCS: called");
 
-	if (slurmdbd_unpack_modify_msg(DBD_MODIFY_ASSOCS, &get_msg, 
+	if (slurmdbd_unpack_modify_msg(slurmdbd_conn->rpc_version, 
+				       DBD_MODIFY_ASSOCS, &get_msg, 
 				       in_buffer) != SLURM_SUCCESS) {
 		comment = "Failed to unpack DBD_MODIFY_ASSOCS message";
 		error("%s", comment);
-		*out_buffer = make_dbd_rc_msg(SLURM_ERROR,
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+					      SLURM_ERROR,
 					      comment, DBD_MODIFY_ASSOCS);
 		return SLURM_ERROR;
 	}
@@ -1328,7 +1478,8 @@ static int   _modify_assocs(void *db_conn,
 	 */
 
 	if(!(list_msg.my_list = acct_storage_g_modify_associations(
-		     db_conn, *uid, get_msg->cond, get_msg->rec))) {
+		     slurmdbd_conn->db_conn, *uid, get_msg->cond,
+		     get_msg->rec))) {
 		if(errno == ESLURM_ACCESS_DENIED) {
 			comment = "Your user doesn't have privilege to preform this action";
 			rc = ESLURM_ACCESS_DENIED;
@@ -1343,22 +1494,26 @@ static int   _modify_assocs(void *db_conn,
 			rc = SLURM_ERROR;
 		}
 		error("%s", comment);
-		slurmdbd_free_modify_msg(DBD_MODIFY_ASSOCS, get_msg);
-		*out_buffer = make_dbd_rc_msg(rc, comment, DBD_MODIFY_ASSOCS);
+		slurmdbd_free_modify_msg(slurmdbd_conn->rpc_version, 
+					 DBD_MODIFY_ASSOCS, get_msg);
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+					      rc, comment, DBD_MODIFY_ASSOCS);
 		return rc;
 	}
 
-	slurmdbd_free_modify_msg(DBD_MODIFY_ASSOCS, get_msg);
+	slurmdbd_free_modify_msg(slurmdbd_conn->rpc_version, 
+				 DBD_MODIFY_ASSOCS, get_msg);
 	*out_buffer = init_buf(1024);
 	pack16((uint16_t) DBD_GOT_LIST, *out_buffer);
-	slurmdbd_pack_list_msg(DBD_GOT_LIST, &list_msg, *out_buffer);
+	slurmdbd_pack_list_msg(slurmdbd_conn->rpc_version, 
+			       DBD_GOT_LIST, &list_msg, *out_buffer);
 	if(list_msg.my_list)
 		list_destroy(list_msg.my_list);
 
 	return rc;
 }
 
-static int   _modify_clusters(void *db_conn,
+static int   _modify_clusters(slurmdbd_conn_t *slurmdbd_conn,
 			      Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
 	dbd_list_msg_t list_msg;
@@ -1367,21 +1522,24 @@ static int   _modify_clusters(void *db_conn,
 	char *comment = NULL;
 
 	if((*uid != slurmdbd_conf->slurm_user_id && *uid != 0)
-	   && assoc_mgr_get_admin_level(db_conn, *uid)
+	   && assoc_mgr_get_admin_level(slurmdbd_conn->db_conn, *uid)
 	   < ACCT_ADMIN_SUPER_USER) {
 		comment = "Your user doesn't have privilege to preform this action";
 		error("%s", comment);
-		*out_buffer = make_dbd_rc_msg(ESLURM_ACCESS_DENIED,
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+					      ESLURM_ACCESS_DENIED,
 					      comment, DBD_MODIFY_CLUSTERS);
 
 		return ESLURM_ACCESS_DENIED;
 	}
 
-	if (slurmdbd_unpack_modify_msg(DBD_MODIFY_CLUSTERS, &get_msg,
+	if (slurmdbd_unpack_modify_msg(slurmdbd_conn->rpc_version, 
+				       DBD_MODIFY_CLUSTERS, &get_msg,
 				       in_buffer) != SLURM_SUCCESS) {
 		comment = "Failed to unpack DBD_MODIFY_CLUSTERS message";
 		error("%s", comment);
-		*out_buffer = make_dbd_rc_msg(SLURM_ERROR,
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+					      SLURM_ERROR,
 					      comment, DBD_MODIFY_CLUSTERS);
 		return SLURM_ERROR;
 	}
@@ -1389,7 +1547,8 @@ static int   _modify_clusters(void *db_conn,
 	debug2("DBD_MODIFY_CLUSTERS: called");
 
 	if(!(list_msg.my_list = acct_storage_g_modify_clusters(
-		     db_conn, *uid, get_msg->cond, get_msg->rec))) {
+		     slurmdbd_conn->db_conn, *uid, get_msg->cond,
+		     get_msg->rec))) {
 		if(errno == ESLURM_ACCESS_DENIED) {
 			comment = "Your user doesn't have privilege to preform this action";
 			rc = ESLURM_ACCESS_DENIED;
@@ -1404,22 +1563,26 @@ static int   _modify_clusters(void *db_conn,
 			rc = SLURM_ERROR;
 		}
 		error("%s", comment);
-		slurmdbd_free_modify_msg(DBD_MODIFY_CLUSTERS, get_msg);
-		*out_buffer = make_dbd_rc_msg(rc, comment, DBD_MODIFY_CLUSTERS);
+		slurmdbd_free_modify_msg(slurmdbd_conn->rpc_version, 
+					 DBD_MODIFY_CLUSTERS, get_msg);
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+					      rc, comment, DBD_MODIFY_CLUSTERS);
 		return rc;
 	}
 
-	slurmdbd_free_modify_msg(DBD_MODIFY_CLUSTERS, get_msg);
+	slurmdbd_free_modify_msg(slurmdbd_conn->rpc_version, 
+				 DBD_MODIFY_CLUSTERS, get_msg);
 	*out_buffer = init_buf(1024);
 	pack16((uint16_t) DBD_GOT_LIST, *out_buffer);
-	slurmdbd_pack_list_msg(DBD_GOT_LIST, &list_msg, *out_buffer);
+	slurmdbd_pack_list_msg(slurmdbd_conn->rpc_version, 
+			       DBD_GOT_LIST, &list_msg, *out_buffer);
 	if(list_msg.my_list)
 		list_destroy(list_msg.my_list);
 
 	return rc;
 }
 
-static int   _modify_users(void *db_conn,
+static int   _modify_users(slurmdbd_conn_t *slurmdbd_conn,
 			   Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
 	dbd_list_msg_t list_msg;
@@ -1427,17 +1590,20 @@ static int   _modify_users(void *db_conn,
 	dbd_modify_msg_t *get_msg = NULL;
 	char *comment = NULL;
 	int same_user = 0;
-	int admin_level = assoc_mgr_get_admin_level(db_conn, *uid);
+	int admin_level = assoc_mgr_get_admin_level(slurmdbd_conn->db_conn,
+						    *uid);
 	acct_user_cond_t *user_cond = NULL;
 	acct_user_rec_t *user_rec = NULL;
 		
 	debug2("DBD_MODIFY_USERS: called");
 
-	if (slurmdbd_unpack_modify_msg(DBD_MODIFY_USERS, &get_msg, in_buffer) !=
+	if (slurmdbd_unpack_modify_msg(slurmdbd_conn->rpc_version, 
+				       DBD_MODIFY_USERS, &get_msg, in_buffer) !=
 	    SLURM_SUCCESS) {
 		comment = "Failed to unpack DBD_MODIFY_USERS message";
 		error("%s", comment);
-		*out_buffer = make_dbd_rc_msg(SLURM_ERROR,
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+					      SLURM_ERROR,
 					      comment, DBD_MODIFY_USERS);
 		return SLURM_ERROR;
 	}
@@ -1459,7 +1625,8 @@ static int   _modify_users(void *db_conn,
 		}
 		comment = "Your user doesn't have privilege to preform this action";
 		error("%s", comment);
-		*out_buffer = make_dbd_rc_msg(ESLURM_ACCESS_DENIED,
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+					      ESLURM_ACCESS_DENIED,
 					      comment, DBD_MODIFY_USERS);
 
 		return ESLURM_ACCESS_DENIED;
@@ -1472,12 +1639,13 @@ is_same_user:
 		/* If we add anything else here for the user we will
 		 * need to document it
 		 */
-		if((user_rec->admin_level != ACCT_ADMIN_NOTSET)
-		   || (user_rec->qos_list)) {
+		if((user_rec->admin_level != ACCT_ADMIN_NOTSET)) {
 			comment = "You can only change your own default account, nothing else";
 			error("%s", comment);
-			*out_buffer = make_dbd_rc_msg(ESLURM_ACCESS_DENIED,
-						      comment, DBD_MODIFY_USERS);
+			*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+						      ESLURM_ACCESS_DENIED,
+						      comment,
+						      DBD_MODIFY_USERS);
 			
 			return ESLURM_ACCESS_DENIED;	
 		}		
@@ -1491,7 +1659,7 @@ is_same_user:
 	}
 
 	if(!(list_msg.my_list = acct_storage_g_modify_users(
-		     db_conn, *uid, user_cond, user_rec))) {
+		     slurmdbd_conn->db_conn, *uid, user_cond, user_rec))) {
 		if(errno == ESLURM_ACCESS_DENIED) {
 			comment = "Your user doesn't have privilege to preform this action";
 			rc = ESLURM_ACCESS_DENIED;
@@ -1506,22 +1674,26 @@ is_same_user:
 			rc = SLURM_ERROR;
 		}
 		error("%s", comment);
-		slurmdbd_free_modify_msg(DBD_MODIFY_USERS, get_msg);
-		*out_buffer = make_dbd_rc_msg(rc, comment, DBD_MODIFY_USERS);
+		slurmdbd_free_modify_msg(slurmdbd_conn->rpc_version, 
+					 DBD_MODIFY_USERS, get_msg);
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+					      rc, comment, DBD_MODIFY_USERS);
 		return rc;
 	}
 
-	slurmdbd_free_modify_msg(DBD_MODIFY_USERS, get_msg);
+	slurmdbd_free_modify_msg(slurmdbd_conn->rpc_version, 
+				 DBD_MODIFY_USERS, get_msg);
 	*out_buffer = init_buf(1024);
 	pack16((uint16_t) DBD_GOT_LIST, *out_buffer);
-	slurmdbd_pack_list_msg(DBD_GOT_LIST, &list_msg, *out_buffer);
+	slurmdbd_pack_list_msg(slurmdbd_conn->rpc_version, 
+			       DBD_GOT_LIST, &list_msg, *out_buffer);
 	if(list_msg.my_list)
 		list_destroy(list_msg.my_list);
 
 	return rc;
 }
 
-static int _node_state(void *db_conn,
+static int _node_state(slurmdbd_conn_t *slurmdbd_conn,
 		       Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
 	dbd_node_state_msg_t *node_state_msg = NULL;
@@ -1537,7 +1709,8 @@ static int _node_state(void *db_conn,
 		rc = ESLURM_ACCESS_DENIED;
 		goto end_it;
 	}
-	if (slurmdbd_unpack_node_state_msg(&node_state_msg, in_buffer) !=
+	if (slurmdbd_unpack_node_state_msg(slurmdbd_conn->rpc_version, 
+					   &node_state_msg, in_buffer) !=
 	    SLURM_SUCCESS) {
 		comment = "Failed to unpack DBD_NODE_STATE message";
 		error("%s", comment);
@@ -1545,11 +1718,18 @@ static int _node_state(void *db_conn,
 		goto end_it;
 	}
 
-	debug2("DBD_NODE_STATE: NODE:%s STATE:%s REASON:%s TIME:%u", 
-	       node_state_msg->hostlist,
-	       _node_state_string(node_state_msg->new_state),
-	       node_state_msg->reason, 
-	       node_state_msg->event_time);
+	if(node_state_msg->new_state == DBD_NODE_STATE_UP)
+		debug3("DBD_NODE_STATE: NODE:%s STATE:%s REASON:%s TIME:%u", 
+		       node_state_msg->hostlist,
+		       _node_state_string(node_state_msg->new_state),
+		       node_state_msg->reason, 
+		       node_state_msg->event_time);
+	else
+		debug2("DBD_NODE_STATE: NODE:%s STATE:%s REASON:%s TIME:%u", 
+		       node_state_msg->hostlist,
+		       _node_state_string(node_state_msg->new_state),
+		       node_state_msg->reason, 
+		       node_state_msg->event_time);
 	node_ptr.name = node_state_msg->hostlist;
 	node_ptr.cpus = node_state_msg->cpu_count;
 
@@ -1557,13 +1737,13 @@ static int _node_state(void *db_conn,
 
 	if(node_state_msg->new_state == DBD_NODE_STATE_DOWN)
 		rc = clusteracct_storage_g_node_down(
-			db_conn,
+			slurmdbd_conn->db_conn,
 			node_state_msg->cluster_name,
 			&node_ptr,
 			node_state_msg->event_time,
 			node_state_msg->reason);
 	else
-		rc = clusteracct_storage_g_node_up(db_conn,
+		rc = clusteracct_storage_g_node_up(slurmdbd_conn->db_conn,
 						   node_state_msg->cluster_name,
 						   &node_ptr,
 						   node_state_msg->event_time);
@@ -1572,30 +1752,30 @@ static int _node_state(void *db_conn,
 		rc = SLURM_SUCCESS;
 
 end_it:
-	slurmdbd_free_node_state_msg(node_state_msg);
-	*out_buffer = make_dbd_rc_msg(rc, comment, DBD_NODE_STATE);
+	slurmdbd_free_node_state_msg(slurmdbd_conn->rpc_version, 
+				     node_state_msg);
+	*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+				      rc, comment, DBD_NODE_STATE);
 	return SLURM_SUCCESS;
 }
 
 static char *_node_state_string(uint16_t node_state)
 {
 	switch(node_state) {
-		case DBD_NODE_STATE_DOWN:
-			return "DOWN";
-		case DBD_NODE_STATE_UP:
-			return "UP";
+	case DBD_NODE_STATE_DOWN:
+		return "DOWN";
+	case DBD_NODE_STATE_UP:
+		return "UP";
 	}
 	return "UNKNOWN";
 }
 
-static int   _register_ctld(void *db_conn, slurm_fd orig_fd,
+static int   _register_ctld(slurmdbd_conn_t *slurmdbd_conn,
 			    Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
 	dbd_register_ctld_msg_t *register_ctld_msg = NULL;
 	int rc = SLURM_SUCCESS;
-	char *comment = NULL, ip[32];
-	slurm_addr ctld_address;
-	uint16_t orig_port;
+	char *comment = NULL;
 	acct_cluster_cond_t cluster_q;
 	acct_cluster_rec_t cluster;
 	dbd_list_msg_t list_msg;
@@ -1606,7 +1786,8 @@ static int   _register_ctld(void *db_conn, slurm_fd orig_fd,
 		rc = ESLURM_ACCESS_DENIED;
 		goto end_it;
 	}
-	if (slurmdbd_unpack_register_ctld_msg(&register_ctld_msg, in_buffer) !=
+	if (slurmdbd_unpack_register_ctld_msg(slurmdbd_conn->rpc_version, 
+					      &register_ctld_msg, in_buffer) !=
 	    SLURM_SUCCESS) {
 		comment = "Failed to unpack DBD_REGISTER_CTLD message";
 		error("%s", comment);
@@ -1615,20 +1796,24 @@ static int   _register_ctld(void *db_conn, slurm_fd orig_fd,
 	}
 	debug2("DBD_REGISTER_CTLD: called for %s(%u)",
 	       register_ctld_msg->cluster_name, register_ctld_msg->port);
-	slurm_get_peer_addr(orig_fd, &ctld_address);
-	slurm_get_ip_str(&ctld_address, &orig_port, ip, sizeof(ip));
-	debug2("slurmctld at ip:%s, port:%d", ip, register_ctld_msg->port);
+
+	debug2("slurmctld at ip:%s, port:%d", slurmdbd_conn->ip,
+	       register_ctld_msg->port);
 
 	memset(&cluster_q, 0, sizeof(acct_cluster_cond_t));
 	memset(&cluster, 0, sizeof(acct_cluster_rec_t));
 	cluster_q.cluster_list = list_create(NULL);
 	list_append(cluster_q.cluster_list, register_ctld_msg->cluster_name);
-	cluster.control_host = ip;
+	cluster.control_host = slurmdbd_conn->ip;
 	cluster.control_port = register_ctld_msg->port;
-	list_msg.my_list = acct_storage_g_modify_clusters(
-		db_conn, *uid, &cluster_q, &cluster);
+	cluster.rpc_version = slurmdbd_conn->rpc_version;
 
-	if(!list_msg.my_list || !list_count(list_msg.my_list)) {
+	list_msg.my_list = acct_storage_g_modify_clusters(
+		slurmdbd_conn->db_conn, *uid, &cluster_q, &cluster);
+	if(errno == EFAULT) {
+		comment = "Request to register was incomplete";
+		rc = SLURM_ERROR;		
+	} else if(!list_msg.my_list || !list_count(list_msg.my_list)) {
 		comment = "This cluster hasn't been added to accounting yet";
 		rc = SLURM_ERROR;
 	} 
@@ -1643,33 +1828,36 @@ static int   _register_ctld(void *db_conn, slurm_fd orig_fd,
 	 * out_msg.flags = SLURM_GLOBAL_AUTH_KEY;
 	 */
 #if 0
-{
-	/* Code to validate communications back to slurmctld */
-	slurm_fd fd;
-	slurm_set_addr_char(&ctld_address, register_ctld_msg->port, ip);
-	fd =  slurm_open_msg_conn(&ctld_address);
-	if (fd < 0) {
-		error("can not open socket back to slurmctld");
-	} else {
-		slurm_msg_t out_msg;
-		slurm_msg_t_init(&out_msg);
-		out_msg.msg_type = REQUEST_PING;
-		out_msg.flags = SLURM_GLOBAL_AUTH_KEY;
-		slurm_send_node_msg(fd, &out_msg);
-		/* We probably need to add matching recv_msg function
-		 * for an arbitray fd or should these be fire and forget? */
-		slurm_close_stream(fd);
+	{
+		/* Code to validate communications back to slurmctld */
+		slurm_fd fd;
+		slurm_set_addr_char(&ctld_address, register_ctld_msg->port, ip);
+		fd =  slurm_open_msg_conn(&ctld_address);
+		if (fd < 0) {
+			error("can not open socket back to slurmctld");
+		} else {
+			slurm_msg_t out_msg;
+			slurm_msg_t_init(&out_msg);
+			out_msg.msg_type = REQUEST_PING;
+			out_msg.flags = SLURM_GLOBAL_AUTH_KEY;
+			slurm_send_node_msg(slurmdbd_conn->rpc_version, 
+					    fd, &out_msg);
+			/* We probably need to add matching recv_msg function
+			 * for an arbitray fd or should these be fire and forget? */
+			slurm_close_stream(fd);
+		}
 	}
-}
 #endif
 
 end_it:
-	slurmdbd_free_register_ctld_msg(register_ctld_msg);
-	*out_buffer = make_dbd_rc_msg(rc, comment, DBD_REGISTER_CTLD);
+	slurmdbd_free_register_ctld_msg(slurmdbd_conn->rpc_version, 
+					register_ctld_msg);
+	*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+				      rc, comment, DBD_REGISTER_CTLD);
 	return rc;
 }
 
-static int   _remove_accounts(void *db_conn,
+static int   _remove_accounts(slurmdbd_conn_t *slurmdbd_conn,
 			      Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
 	int rc = SLURM_SUCCESS;
@@ -1680,26 +1868,30 @@ static int   _remove_accounts(void *db_conn,
 	debug2("DBD_REMOVE_ACCOUNTS: called");
 
 	if((*uid != slurmdbd_conf->slurm_user_id && *uid != 0)
-	   && assoc_mgr_get_admin_level(db_conn, *uid) < ACCT_ADMIN_OPERATOR) {
+	   && assoc_mgr_get_admin_level(slurmdbd_conn->db_conn, *uid) 
+	   < ACCT_ADMIN_OPERATOR) {
 		comment = "Your user doesn't have privilege to preform this action";
 		error("%s", comment);
-		*out_buffer = make_dbd_rc_msg(ESLURM_ACCESS_DENIED,
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+					      ESLURM_ACCESS_DENIED,
 					      comment, DBD_REMOVE_ACCOUNTS);
 
 		return ESLURM_ACCESS_DENIED;
 	}
 
-	if (slurmdbd_unpack_cond_msg(DBD_REMOVE_ACCOUNTS, &get_msg, 
+	if (slurmdbd_unpack_cond_msg(slurmdbd_conn->rpc_version, 
+				     DBD_REMOVE_ACCOUNTS, &get_msg, 
 				     in_buffer) != SLURM_SUCCESS) {
 		comment = "Failed to unpack DBD_REMOVE_ACCOUNTS message";
 		error("%s", comment);
-		*out_buffer = make_dbd_rc_msg(SLURM_ERROR,
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+					      SLURM_ERROR,
 					      comment, DBD_REMOVE_ACCOUNTS);
 		return SLURM_ERROR;
 	}
 	
 	if(!(list_msg.my_list = acct_storage_g_remove_accounts(
-		     db_conn, *uid, get_msg->cond))) {
+		     slurmdbd_conn->db_conn, *uid, get_msg->cond))) {
 		if(errno == ESLURM_ACCESS_DENIED) {
 			comment = "Your user doesn't have privilege to preform this action";
 			rc = ESLURM_ACCESS_DENIED;
@@ -1714,22 +1906,26 @@ static int   _remove_accounts(void *db_conn,
 			rc = SLURM_ERROR;
 		}
 		error("%s", comment);
-		slurmdbd_free_cond_msg(DBD_REMOVE_ACCOUNTS, get_msg);
-		*out_buffer = make_dbd_rc_msg(rc, comment, DBD_REMOVE_ACCOUNTS);
+		slurmdbd_free_cond_msg(slurmdbd_conn->rpc_version, 
+				       DBD_REMOVE_ACCOUNTS, get_msg);
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+					      rc, comment, DBD_REMOVE_ACCOUNTS);
 		return rc;
 	}
 
-	slurmdbd_free_cond_msg(DBD_REMOVE_ACCOUNTS, get_msg);
+	slurmdbd_free_cond_msg(slurmdbd_conn->rpc_version, 
+			       DBD_REMOVE_ACCOUNTS, get_msg);
 	*out_buffer = init_buf(1024);
 	pack16((uint16_t) DBD_GOT_LIST, *out_buffer);
-	slurmdbd_pack_list_msg(DBD_GOT_LIST, &list_msg, *out_buffer);
+	slurmdbd_pack_list_msg(slurmdbd_conn->rpc_version, 
+			       DBD_GOT_LIST, &list_msg, *out_buffer);
 	if(list_msg.my_list)
 		list_destroy(list_msg.my_list);
 
 	return rc;
 }
 
-static int   _remove_account_coords(void *db_conn,
+static int   _remove_account_coords(slurmdbd_conn_t *slurmdbd_conn,
 				    Buf in_buffer, Buf *out_buffer,
 				    uint32_t *uid)
 {
@@ -1740,11 +1936,13 @@ static int   _remove_account_coords(void *db_conn,
 
 	debug2("DBD_REMOVE_ACCOUNT_COORDS: called");
 
-	if (slurmdbd_unpack_acct_coord_msg(&get_msg, in_buffer) !=
+	if (slurmdbd_unpack_acct_coord_msg(slurmdbd_conn->rpc_version, 
+					   &get_msg, in_buffer) !=
 	    SLURM_SUCCESS) {
 		comment = "Failed to unpack DBD_REMOVE_ACCOUNT_COORDS message";
 		error("%s", comment);
-		*out_buffer = make_dbd_rc_msg(SLURM_ERROR, comment,
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+					      SLURM_ERROR, comment,
 					      DBD_ADD_ACCOUNT_COORDS);
 		return SLURM_ERROR;
 	}
@@ -1755,7 +1953,8 @@ static int   _remove_account_coords(void *db_conn,
 	 */
 
 	if(!(list_msg.my_list = acct_storage_g_remove_coord(
-		     db_conn, *uid, get_msg->acct_list, get_msg->cond))) {
+		     slurmdbd_conn->db_conn, *uid, get_msg->acct_list,
+		     get_msg->cond))) {
 		if(errno == ESLURM_ACCESS_DENIED) {
 			comment = "Your user doesn't have privilege to preform this action";
 			rc = ESLURM_ACCESS_DENIED;
@@ -1770,23 +1969,27 @@ static int   _remove_account_coords(void *db_conn,
 			rc = SLURM_ERROR;
 		}
 		error("%s", comment);
-		slurmdbd_free_acct_coord_msg(get_msg);
-		*out_buffer = make_dbd_rc_msg(rc, comment, 
+		slurmdbd_free_acct_coord_msg(slurmdbd_conn->rpc_version, 
+					     get_msg);
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+					      rc, comment, 
 					      DBD_REMOVE_ACCOUNT_COORDS);
 		return rc;
 	}
 
-	slurmdbd_free_acct_coord_msg(get_msg);
+	slurmdbd_free_acct_coord_msg(slurmdbd_conn->rpc_version, 
+				     get_msg);
 	*out_buffer = init_buf(1024);
 	pack16((uint16_t) DBD_GOT_LIST, *out_buffer);
-	slurmdbd_pack_list_msg(DBD_GOT_LIST, &list_msg, *out_buffer);
+	slurmdbd_pack_list_msg(slurmdbd_conn->rpc_version, 
+			       DBD_GOT_LIST, &list_msg, *out_buffer);
 	if(list_msg.my_list)
 		list_destroy(list_msg.my_list);
 
 	return rc;
 }
 
-static int   _remove_assocs(void *db_conn,
+static int   _remove_assocs(slurmdbd_conn_t *slurmdbd_conn,
 			    Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
 	int rc = SLURM_SUCCESS;
@@ -1795,11 +1998,13 @@ static int   _remove_assocs(void *db_conn,
 	char *comment = NULL;
 
 	debug2("DBD_REMOVE_ASSOCS: called");
-	if (slurmdbd_unpack_cond_msg(DBD_REMOVE_ASSOCS, &get_msg, in_buffer) !=
+	if (slurmdbd_unpack_cond_msg(slurmdbd_conn->rpc_version, 
+				     DBD_REMOVE_ASSOCS, &get_msg, in_buffer) !=
 	    SLURM_SUCCESS) {
 		comment = "Failed to unpack DBD_REMOVE_ASSOCS message";
 		error("%s", comment);
-		*out_buffer = make_dbd_rc_msg(SLURM_ERROR,
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+					      SLURM_ERROR,
 					      comment, DBD_REMOVE_ASSOCS);
 		return SLURM_ERROR;
 	}
@@ -1810,7 +2015,7 @@ static int   _remove_assocs(void *db_conn,
 	 */
 
 	if(!(list_msg.my_list = acct_storage_g_remove_associations(
-		     db_conn, *uid, get_msg->cond))) {
+		     slurmdbd_conn->db_conn, *uid, get_msg->cond))) {
 		if(errno == ESLURM_ACCESS_DENIED) {
 			comment = "Your user doesn't have privilege to preform this action";
 			rc = ESLURM_ACCESS_DENIED;
@@ -1825,15 +2030,19 @@ static int   _remove_assocs(void *db_conn,
 			rc = SLURM_ERROR;
 		}
 		error("%s", comment);
-		slurmdbd_free_cond_msg(DBD_REMOVE_ASSOCS, get_msg);
-		*out_buffer = make_dbd_rc_msg(rc, comment, DBD_REMOVE_ASSOCS);
+		slurmdbd_free_cond_msg(slurmdbd_conn->rpc_version, 
+				       DBD_REMOVE_ASSOCS, get_msg);
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+					      rc, comment, DBD_REMOVE_ASSOCS);
 		return rc;
 	}
 	
-	slurmdbd_free_cond_msg(DBD_REMOVE_ASSOCS, get_msg);
+	slurmdbd_free_cond_msg(slurmdbd_conn->rpc_version, 
+			       DBD_REMOVE_ASSOCS, get_msg);
 	*out_buffer = init_buf(1024);
 	pack16((uint16_t) DBD_GOT_LIST, *out_buffer);
-	slurmdbd_pack_list_msg(DBD_GOT_LIST, &list_msg, *out_buffer);
+	slurmdbd_pack_list_msg(slurmdbd_conn->rpc_version, 
+			       DBD_GOT_LIST, &list_msg, *out_buffer);
 	if(list_msg.my_list)
 		list_destroy(list_msg.my_list);
 
@@ -1841,7 +2050,7 @@ static int   _remove_assocs(void *db_conn,
 
 }
 
-static int   _remove_clusters(void *db_conn,
+static int   _remove_clusters(slurmdbd_conn_t *slurmdbd_conn,
 			      Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
 	int rc = SLURM_SUCCESS;
@@ -1852,27 +2061,30 @@ static int   _remove_clusters(void *db_conn,
 	debug2("DBD_REMOVE_CLUSTERS: called");
 
 	if((*uid != slurmdbd_conf->slurm_user_id && *uid != 0)
-	   && assoc_mgr_get_admin_level(db_conn, *uid) 
+	   && assoc_mgr_get_admin_level(slurmdbd_conn->db_conn, *uid) 
 	   < ACCT_ADMIN_SUPER_USER) {
 		comment = "Your user doesn't have privilege to preform this action";
 		error("%s", comment);
-		*out_buffer = make_dbd_rc_msg(ESLURM_ACCESS_DENIED,
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+					      ESLURM_ACCESS_DENIED,
 					      comment, DBD_REMOVE_CLUSTERS);
 
 		return ESLURM_ACCESS_DENIED;
 	}
 
-	if (slurmdbd_unpack_cond_msg(DBD_REMOVE_CLUSTERS, &get_msg, 
+	if (slurmdbd_unpack_cond_msg(slurmdbd_conn->rpc_version, 
+				     DBD_REMOVE_CLUSTERS, &get_msg, 
 				     in_buffer) != SLURM_SUCCESS) {
 		comment = "Failed to unpack DBD_REMOVE_CLUSTERS message";
 		error("%s", comment);
-		*out_buffer = make_dbd_rc_msg(SLURM_ERROR,
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+					      SLURM_ERROR,
 					      comment, DBD_REMOVE_CLUSTERS);
 		return SLURM_ERROR;
 	}
 	
 	if(!(list_msg.my_list = acct_storage_g_remove_clusters(
-		     db_conn, *uid, get_msg->cond))) {
+		     slurmdbd_conn->db_conn, *uid, get_msg->cond))) {
 		if(errno == ESLURM_ACCESS_DENIED) {
 			comment = "Your user doesn't have privilege to preform this action";
 			rc = ESLURM_ACCESS_DENIED;
@@ -1887,23 +2099,27 @@ static int   _remove_clusters(void *db_conn,
 			rc = SLURM_ERROR;
 		}
 		error("%s", comment);
-		slurmdbd_free_cond_msg(DBD_REMOVE_CLUSTERS, get_msg);
-		*out_buffer = make_dbd_rc_msg(rc, comment, DBD_REMOVE_CLUSTERS);
+		slurmdbd_free_cond_msg(slurmdbd_conn->rpc_version, 
+				       DBD_REMOVE_CLUSTERS, get_msg);
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+					      rc, comment, DBD_REMOVE_CLUSTERS);
 		return rc;		
 	}
 
-	slurmdbd_free_cond_msg(DBD_REMOVE_CLUSTERS, get_msg);
+	slurmdbd_free_cond_msg(slurmdbd_conn->rpc_version, 
+			       DBD_REMOVE_CLUSTERS, get_msg);
 	*out_buffer = init_buf(1024);
 	pack16((uint16_t) DBD_GOT_LIST, *out_buffer);
-	slurmdbd_pack_list_msg(DBD_GOT_LIST, &list_msg, *out_buffer);
+	slurmdbd_pack_list_msg(slurmdbd_conn->rpc_version, 
+			       DBD_GOT_LIST, &list_msg, *out_buffer);
 	if(list_msg.my_list)
 		list_destroy(list_msg.my_list);
 
 	return rc;
 }
 
-static int   _remove_qos(void *db_conn,
-			      Buf in_buffer, Buf *out_buffer, uint32_t *uid)
+static int   _remove_qos(slurmdbd_conn_t *slurmdbd_conn,
+			 Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
 	int rc = SLURM_SUCCESS;
 	dbd_cond_msg_t *get_msg = NULL;
@@ -1913,27 +2129,30 @@ static int   _remove_qos(void *db_conn,
 	debug2("DBD_REMOVE_QOS: called");
 
 	if((*uid != slurmdbd_conf->slurm_user_id && *uid != 0)
-	   && assoc_mgr_get_admin_level(db_conn, *uid) 
+	   && assoc_mgr_get_admin_level(slurmdbd_conn->db_conn, *uid) 
 	   < ACCT_ADMIN_SUPER_USER) {
 		comment = "Your user doesn't have privilege to preform this action";
 		error("%s", comment);
-		*out_buffer = make_dbd_rc_msg(ESLURM_ACCESS_DENIED,
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+					      ESLURM_ACCESS_DENIED,
 					      comment, DBD_REMOVE_QOS);
 
 		return ESLURM_ACCESS_DENIED;
 	}
 
-	if (slurmdbd_unpack_cond_msg(DBD_REMOVE_QOS, &get_msg, 
+	if (slurmdbd_unpack_cond_msg(slurmdbd_conn->rpc_version, 
+				     DBD_REMOVE_QOS, &get_msg, 
 				     in_buffer) != SLURM_SUCCESS) {
 		comment = "Failed to unpack DBD_REMOVE_QOS message";
 		error("%s", comment);
-		*out_buffer = make_dbd_rc_msg(SLURM_ERROR,
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+					      SLURM_ERROR,
 					      comment, DBD_REMOVE_QOS);
 		return SLURM_ERROR;
 	}
 	
 	if(!(list_msg.my_list = acct_storage_g_remove_qos(
-		     db_conn, *uid, get_msg->cond))) {
+		     slurmdbd_conn->db_conn, *uid, get_msg->cond))) {
 		if(errno == ESLURM_ACCESS_DENIED) {
 			comment = "Your user doesn't have privilege to preform this action";
 			rc = ESLURM_ACCESS_DENIED;
@@ -1948,22 +2167,26 @@ static int   _remove_qos(void *db_conn,
 			rc = SLURM_ERROR;
 		}
 		error("%s", comment);
-		slurmdbd_free_cond_msg(DBD_REMOVE_QOS, get_msg);
-		*out_buffer = make_dbd_rc_msg(rc, comment, DBD_REMOVE_QOS);
+		slurmdbd_free_cond_msg(slurmdbd_conn->rpc_version, 
+				       DBD_REMOVE_QOS, get_msg);
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+					      rc, comment, DBD_REMOVE_QOS);
 		return rc;		
 	}
 
-	slurmdbd_free_cond_msg(DBD_REMOVE_QOS, get_msg);
+	slurmdbd_free_cond_msg(slurmdbd_conn->rpc_version, 
+			       DBD_REMOVE_QOS, get_msg);
 	*out_buffer = init_buf(1024);
 	pack16((uint16_t) DBD_GOT_LIST, *out_buffer);
-	slurmdbd_pack_list_msg(DBD_GOT_LIST, &list_msg, *out_buffer);
+	slurmdbd_pack_list_msg(slurmdbd_conn->rpc_version, 
+			       DBD_GOT_LIST, &list_msg, *out_buffer);
 	if(list_msg.my_list)
 		list_destroy(list_msg.my_list);
 
 	return rc;
 }
 
-static int   _remove_users(void *db_conn,
+static int   _remove_users(slurmdbd_conn_t *slurmdbd_conn,
 			   Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
 	int rc = SLURM_SUCCESS;
@@ -1974,26 +2197,30 @@ static int   _remove_users(void *db_conn,
 	debug2("DBD_REMOVE_USERS: called");
 
 	if((*uid != slurmdbd_conf->slurm_user_id && *uid != 0)
-	   && assoc_mgr_get_admin_level(db_conn, *uid) < ACCT_ADMIN_OPERATOR) {
+	   && assoc_mgr_get_admin_level(slurmdbd_conn->db_conn, *uid) 
+	   < ACCT_ADMIN_OPERATOR) {
 		comment = "Your user doesn't have privilege to preform this action";
 		error("%s", comment);
-		*out_buffer = make_dbd_rc_msg(ESLURM_ACCESS_DENIED,
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+					      ESLURM_ACCESS_DENIED,
 					      comment, DBD_REMOVE_USERS);
 
 		return ESLURM_ACCESS_DENIED;
 	}
 
-	if (slurmdbd_unpack_cond_msg(DBD_REMOVE_USERS, &get_msg, in_buffer) !=
+	if (slurmdbd_unpack_cond_msg(slurmdbd_conn->rpc_version, 
+				     DBD_REMOVE_USERS, &get_msg, in_buffer) !=
 	    SLURM_SUCCESS) {
 		comment = "Failed to unpack DBD_REMOVE_USERS message";
 		error("%s", comment);
-		*out_buffer = make_dbd_rc_msg(SLURM_ERROR,
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+					      SLURM_ERROR,
 					      comment, DBD_REMOVE_USERS);
 		return SLURM_ERROR;
 	}
 	
 	if(!(list_msg.my_list = acct_storage_g_remove_users(
-		     db_conn, *uid, get_msg->cond))) {
+		     slurmdbd_conn->db_conn, *uid, get_msg->cond))) {
 		if(errno == ESLURM_ACCESS_DENIED) {
 			comment = "Your user doesn't have privilege to preform this action";
 			rc = ESLURM_ACCESS_DENIED;
@@ -2008,22 +2235,26 @@ static int   _remove_users(void *db_conn,
 			rc = SLURM_ERROR;
 		}
 		error("%s", comment);
-		slurmdbd_free_cond_msg(DBD_REMOVE_USERS, get_msg);
-		*out_buffer = make_dbd_rc_msg(rc, comment, DBD_REMOVE_USERS);
+		slurmdbd_free_cond_msg(slurmdbd_conn->rpc_version, 
+				       DBD_REMOVE_USERS, get_msg);
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+					      rc, comment, DBD_REMOVE_USERS);
 		return rc;
 	}
 
-	slurmdbd_free_cond_msg(DBD_REMOVE_USERS, get_msg);
+	slurmdbd_free_cond_msg(slurmdbd_conn->rpc_version, 
+			       DBD_REMOVE_USERS, get_msg);
 	*out_buffer = init_buf(1024);
 	pack16((uint16_t) DBD_GOT_LIST, *out_buffer);
-	slurmdbd_pack_list_msg(DBD_GOT_LIST, &list_msg, *out_buffer);
+	slurmdbd_pack_list_msg(slurmdbd_conn->rpc_version, 
+			       DBD_GOT_LIST, &list_msg, *out_buffer);
 	if(list_msg.my_list)
 		list_destroy(list_msg.my_list);
 
 	return rc;
 }
 
-static int   _roll_usage(void *db_conn,
+static int   _roll_usage(slurmdbd_conn_t *slurmdbd_conn,
 			 Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
 	dbd_roll_usage_msg_t *get_msg = NULL;
@@ -2033,14 +2264,16 @@ static int   _roll_usage(void *db_conn,
 	info("DBD_ROLL_USAGE: called");
 
 	if((*uid != slurmdbd_conf->slurm_user_id && *uid != 0)
-	   && assoc_mgr_get_admin_level(db_conn, *uid) < ACCT_ADMIN_OPERATOR) {
+	   && assoc_mgr_get_admin_level(slurmdbd_conn->db_conn, *uid) 
+	   < ACCT_ADMIN_OPERATOR) {
 		comment = "Your user doesn't have privilege to preform this action";
 		error("%s", comment);
 		rc = ESLURM_ACCESS_DENIED;
 		goto end_it;
 	}
 
-	if (slurmdbd_unpack_roll_usage_msg(&get_msg, in_buffer) !=
+	if (slurmdbd_unpack_roll_usage_msg(slurmdbd_conn->rpc_version, 
+					   &get_msg, in_buffer) !=
 	    SLURM_SUCCESS) {
 		comment = "Failed to unpack DBD_ROLL_USAGE message"; 
 		error("%s", comment);
@@ -2048,15 +2281,17 @@ static int   _roll_usage(void *db_conn,
 		goto end_it;
 	}
 
-	rc = acct_storage_g_roll_usage(db_conn, get_msg->start);
+	rc = acct_storage_g_roll_usage(slurmdbd_conn->db_conn, get_msg->start);
 
 end_it:
-	slurmdbd_free_roll_usage_msg(get_msg);
-	*out_buffer = make_dbd_rc_msg(rc, comment, DBD_ROLL_USAGE);
+	slurmdbd_free_roll_usage_msg(slurmdbd_conn->rpc_version, 
+				     get_msg);
+	*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+				      rc, comment, DBD_ROLL_USAGE);
 	return rc;
 }
 
-static int  _step_complete(void *db_conn,
+static int  _step_complete(slurmdbd_conn_t *slurmdbd_conn,
 			   Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
 	dbd_step_comp_msg_t *step_comp_msg = NULL;
@@ -2072,7 +2307,8 @@ static int  _step_complete(void *db_conn,
 		rc = ESLURM_ACCESS_DENIED;
 		goto end_it;
 	}
-	if (slurmdbd_unpack_step_complete_msg(&step_comp_msg, in_buffer) !=
+	if (slurmdbd_unpack_step_complete_msg(slurmdbd_conn->rpc_version, 
+					      &step_comp_msg, in_buffer) !=
 	    SLURM_SUCCESS) {
 		comment = "Failed to unpack DBD_STEP_COMPLETE message";
 		error("%s", comment);
@@ -2103,18 +2339,20 @@ static int  _step_complete(void *db_conn,
 	job.details = &details;
 	step.job_ptr = &job;
 
-	rc = jobacct_storage_g_step_complete(db_conn, &step);
+	rc = jobacct_storage_g_step_complete(slurmdbd_conn->db_conn, &step);
 
 	if(rc && errno == 740) /* meaning data is already there */
 		rc = SLURM_SUCCESS;
 
 end_it:
-	slurmdbd_free_step_complete_msg(step_comp_msg);
-	*out_buffer = make_dbd_rc_msg(rc, comment, DBD_STEP_COMPLETE);
+	slurmdbd_free_step_complete_msg(slurmdbd_conn->rpc_version, 
+					step_comp_msg);
+	*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+				      rc, comment, DBD_STEP_COMPLETE);
 	return rc;
 }
 
-static int  _step_start(void *db_conn,
+static int  _step_start(slurmdbd_conn_t *slurmdbd_conn,
 			Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
 	dbd_step_start_msg_t *step_start_msg = NULL;
@@ -2130,7 +2368,8 @@ static int  _step_start(void *db_conn,
 		rc = ESLURM_ACCESS_DENIED;
 		goto end_it;
 	}
-	if (slurmdbd_unpack_step_start_msg(&step_start_msg, in_buffer) !=
+	if (slurmdbd_unpack_step_start_msg(slurmdbd_conn->rpc_version, 
+					   &step_start_msg, in_buffer) !=
 	    SLURM_SUCCESS) {
 		comment = "Failed to unpack DBD_STEP_START message";
 		error("%s", comment);
@@ -2139,7 +2378,7 @@ static int  _step_start(void *db_conn,
 	}
 
 	debug2("DBD_STEP_START: ID:%u.%u NAME:%s SUBMIT:%d", 
-	     step_start_msg->job_id, step_start_msg->step_id,
+	       step_start_msg->job_id, step_start_msg->step_id,
 	       step_start_msg->name, step_start_msg->job_submit_time);
 
 	memset(&step, 0, sizeof(struct step_record));
@@ -2159,17 +2398,19 @@ static int  _step_start(void *db_conn,
 	job.details = &details;
 	step.job_ptr = &job;
 
-	rc = jobacct_storage_g_step_start(db_conn, &step);
+	rc = jobacct_storage_g_step_start(slurmdbd_conn->db_conn, &step);
 
 	if(rc && errno == 740) /* meaning data is already there */
 		rc = SLURM_SUCCESS;
 end_it:
-	slurmdbd_free_step_start_msg(step_start_msg);
-	*out_buffer = make_dbd_rc_msg(rc, comment, DBD_STEP_START);
+	slurmdbd_free_step_start_msg(slurmdbd_conn->rpc_version, 
+				     step_start_msg);
+	*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+				      rc, comment, DBD_STEP_START);
 	return rc;
 }
 
-static int  _update_shares_used(void *db_conn,
+static int  _update_shares_used(slurmdbd_conn_t *slurmdbd_conn,
 				Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
 	int rc = SLURM_SUCCESS;
@@ -2183,7 +2424,8 @@ static int  _update_shares_used(void *db_conn,
 		goto end_it;
 	}
 	debug2("DBD_UPDATE_SHARES_USED");
-	if (slurmdbd_unpack_list_msg(DBD_UPDATE_SHARES_USED, &used_shares_msg, 
+	if (slurmdbd_unpack_list_msg(slurmdbd_conn->rpc_version, 
+				     DBD_UPDATE_SHARES_USED, &used_shares_msg, 
 				     in_buffer) != SLURM_SUCCESS) {
 		comment = "Failed to unpack DBD_UPDATE_SHARES_USED message";
 		error("%s", comment);
@@ -2204,11 +2446,13 @@ static int  _update_shares_used(void *db_conn,
 #endif
 	}
 
-	rc = acct_storage_g_update_shares_used(db_conn, 
+	rc = acct_storage_g_update_shares_used(slurmdbd_conn->db_conn, 
 					       used_shares_msg->my_list);
 
 end_it:
-	slurmdbd_free_list_msg(used_shares_msg);
-	*out_buffer = make_dbd_rc_msg(rc, comment, DBD_UPDATE_SHARES_USED);
+	slurmdbd_free_list_msg(slurmdbd_conn->rpc_version, 
+			       used_shares_msg);
+	*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+				      rc, comment, DBD_UPDATE_SHARES_USED);
 	return rc;
 }
diff --git a/src/slurmdbd/proc_req.h b/src/slurmdbd/proc_req.h
index 6ed7cc191..faca952d0 100644
--- a/src/slurmdbd/proc_req.h
+++ b/src/slurmdbd/proc_req.h
@@ -42,15 +42,24 @@
 #include "src/common/pack.h"
 #include "src/common/slurm_protocol_defs.h"
 
+typedef struct {
+	void *db_conn; /* database connection */
+	char ip[32];
+	slurm_fd newsockfd; /* socket connection descriptor */
+	uint16_t orig_port;
+	uint16_t rpc_version; /* version of rpc */
+} slurmdbd_conn_t;
+
 /* Process an incoming RPC
- * orig_fd IN - originating file descriptor of the RPC
+ * slurmdbd_conn IN/OUT - in will that the newsockfd set before
+ *       calling and db_conn and rpc_version will be filled in with the init.
  * msg IN - incoming message
  * msg_size IN - size of msg in bytes
  * first IN - set if first message received on the socket
  * buffer OUT - outgoing response, must be freed by caller
  * uid IN/OUT - user ID who initiated the RPC
  * RET SLURM_SUCCESS or error code */
-extern int proc_req(void **db_conn, slurm_fd orig_fd, char *msg, 
+extern int proc_req(slurmdbd_conn_t *slurmdbd_conn, char *msg, 
 		    uint32_t msg_size, bool first, Buf *out_buffer, 
 		    uint32_t *uid);
 
diff --git a/src/slurmdbd/rpc_mgr.c b/src/slurmdbd/rpc_mgr.c
index e58f503cc..617517058 100644
--- a/src/slurmdbd/rpc_mgr.c
+++ b/src/slurmdbd/rpc_mgr.c
@@ -77,10 +77,6 @@ static int             thread_count = 0;
 static pthread_mutex_t thread_count_lock = PTHREAD_MUTEX_INITIALIZER;
 static pthread_cond_t  thread_count_cond = PTHREAD_COND_INITIALIZER;
 
-typedef struct connection_arg {
-	slurm_fd newsockfd;
-} connection_arg_t;
-
 
 /* Process incoming RPCs. Meant to execute as a pthread */
 extern void *rpc_mgr(void *no_data)
@@ -89,7 +85,7 @@ extern void *rpc_mgr(void *no_data)
 	slurm_fd sockfd, newsockfd;
 	int i, retry_cnt, sigarray[] = {SIGUSR1, 0};
 	slurm_addr cli_addr;
-	connection_arg_t *conn_arg = NULL;
+	slurmdbd_conn_t *conn_arg = NULL;
 
 	slurm_mutex_lock(&thread_count_lock);
 	master_thread_id = pthread_self();
@@ -134,8 +130,11 @@ extern void *rpc_mgr(void *no_data)
 			continue;
 		}
 		fd_set_nonblocking(newsockfd);
-		conn_arg = xmalloc(sizeof(connection_arg_t));
+	
+		conn_arg = xmalloc(sizeof(slurmdbd_conn_t));
 		conn_arg->newsockfd = newsockfd;
+		slurm_get_ip_str(&cli_addr, &conn_arg->orig_port,
+				 conn_arg->ip, sizeof(conn_arg->ip));
 		retry_cnt = 0;
 		while (pthread_create(&slave_thread_id[i],
 				      &thread_attr_rpc_req,
@@ -178,16 +177,16 @@ extern void rpc_mgr_wake(void)
 
 static void * _service_connection(void *arg)
 {
-	connection_arg_t *conn = (connection_arg_t *) arg;
+	slurmdbd_conn_t *conn = (slurmdbd_conn_t *) arg;
 	uint32_t nw_size, msg_size, uid;
 	char *msg = NULL;
 	ssize_t msg_read, offset;
 	bool fini = false, first = true;
 	Buf buffer = NULL;
 	int rc;
-	void *db_conn = NULL;
 			 
-	debug2("Opened connection %d", conn->newsockfd);
+	debug2("Opened connection %d from %s", conn->newsockfd,
+		conn->ip);
 	while (!fini) {
 		if (!_fd_readable(conn->newsockfd))
 			break;		/* problem with this socket */
@@ -220,16 +219,21 @@ static void * _service_connection(void *arg)
 			offset += msg_read;
 		}
 		if (msg_size == offset) {
-			rc = proc_req(&db_conn, conn->newsockfd,
-				      msg, msg_size, first, &buffer, &uid);
+			rc = proc_req(
+				conn, msg, msg_size, first, &buffer, &uid);
 			first = false;
-			if (rc != SLURM_SUCCESS) {
-				error("Processing message from connection %d",
-				      conn->newsockfd);
+			if (rc == ESLURM_ACCESS_DENIED
+			    || rc == SLURM_PROTOCOL_VERSION_ERROR) {
+				fini = true;
+			} else if (rc != SLURM_SUCCESS) {
+				error("Processing last message from "
+				      "connection %d(%s) uid(%d)",
+				      conn->newsockfd, conn->ip, uid);
 				//fini = true;
 			}
 		} else {
-			buffer = make_dbd_rc_msg(SLURM_ERROR, "Bad offset", 0);
+			buffer = make_dbd_rc_msg(conn->rpc_version,
+						 SLURM_ERROR, "Bad offset", 0);
 			fini = true;
 		}
 
@@ -237,19 +241,20 @@ static void * _service_connection(void *arg)
 		xfree(msg);
 	}
 
-	acct_storage_g_close_connection(&db_conn);
+	acct_storage_g_close_connection(&conn->db_conn);
 	if (slurm_close_accepted_conn(conn->newsockfd) < 0)
-		error("close(%d): %m",  conn->newsockfd);
+		error("close(%d): %m(%s)",  conn->newsockfd, conn->ip);
 	else
 		debug2("Closed connection %d uid(%d)", conn->newsockfd, uid);
-	xfree(arg);
+	xfree(conn);
 	_free_server_thread(pthread_self());
 	return NULL;
 }
 
 /* Return a buffer containing a DBD_RC (return code) message
  * caller must free returned buffer */
-extern Buf make_dbd_rc_msg(int rc, char *comment, uint16_t sent_type)
+extern Buf make_dbd_rc_msg(uint16_t rpc_version, 
+			   int rc, char *comment, uint16_t sent_type)
 {
 	Buf buffer;
 
@@ -259,7 +264,7 @@ extern Buf make_dbd_rc_msg(int rc, char *comment, uint16_t sent_type)
 	msg.return_code  = rc;
 	msg.comment  = comment;
 	msg.sent_type  = sent_type;
-	slurmdbd_pack_rc_msg(&msg, buffer);
+	slurmdbd_pack_rc_msg(rpc_version, &msg, buffer);
 	return buffer;
 }
 
diff --git a/src/slurmdbd/rpc_mgr.h b/src/slurmdbd/rpc_mgr.h
index 2f5c881cb..b91dcca0f 100644
--- a/src/slurmdbd/rpc_mgr.h
+++ b/src/slurmdbd/rpc_mgr.h
@@ -43,7 +43,8 @@
 
 /* Return a buffer containing a DBD_RC (return code) message
  * caller must free returned buffer */
-extern Buf make_dbd_rc_msg(int rc, char *comment, uint16_t sent_type);
+extern Buf make_dbd_rc_msg(uint16_t rpc_version, 
+			   int rc, char *comment, uint16_t sent_type);
 
 /* Process incoming RPCs. Meant to execute as a pthread */
 extern void *rpc_mgr(void *no_data);
diff --git a/src/slurmdbd/slurmdbd.c b/src/slurmdbd/slurmdbd.c
index ae2a361ca..971a07efe 100644
--- a/src/slurmdbd/slurmdbd.c
+++ b/src/slurmdbd/slurmdbd.c
@@ -97,6 +97,7 @@ int main(int argc, char *argv[])
 	pthread_attr_t thread_attr;
 	char node_name[128];
 	void *db_conn = NULL;
+	assoc_init_args_t assoc_init_arg;
 
 	_init_config();
 	log_init(argv[0], log_opts, LOG_DAEMON, NULL);
@@ -137,9 +138,12 @@ int main(int argc, char *argv[])
 		fatal("pthread_create %m");
 	slurm_attr_destroy(&thread_attr);
 
-	db_conn = acct_storage_g_get_connection(false, false);
+	db_conn = acct_storage_g_get_connection(false, 0, false);
 	
-	if(assoc_mgr_init(db_conn, NULL) == SLURM_ERROR) {
+	memset(&assoc_init_arg, 0, sizeof(assoc_init_args_t));
+	assoc_init_arg.cache_level = ASSOC_MGR_CACHE_USER;
+
+	if(assoc_mgr_init(db_conn, &assoc_init_arg) == SLURM_ERROR) {
 		error("Problem getting cache of data");
 		acct_storage_g_close_connection(&db_conn);
 		goto end_it;
@@ -176,7 +180,7 @@ end_it:
 			slurmdbd_conf->pid_file);
 	}
 
-	assoc_mgr_fini();
+	assoc_mgr_fini(NULL);
 	slurm_acct_storage_fini();
 	slurm_auth_fini();
 	log_fini();
diff --git a/src/smap/configure_functions.c b/src/smap/configure_functions.c
index 45ee0ffa0..77f2f7717 100644
--- a/src/smap/configure_functions.c
+++ b/src/smap/configure_functions.c
@@ -1,6 +1,6 @@
 /*****************************************************************************\
  *  configure_functions.c - Functions related to configure mode of smap.
- *  $Id: configure_functions.c 14295 2008-06-19 23:58:28Z da $
+ *  $Id: configure_functions.c 15192 2008-09-26 16:42:37Z da $
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -37,6 +37,7 @@
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \*****************************************************************************/
 
+#include "src/common/xstring.h"
 #include "src/common/uid.h"
 #include "src/smap/smap.h"
 
@@ -896,15 +897,15 @@ static int _save_allocation(char *com, List allocated_blocks)
 	int len = strlen(com);
 	int i=5, j=0;
 	allocated_block_t *allocated_block = NULL;
-	char filename[20];
-	char save_string[255];
+	char filename[50];
+	char *save_string = NULL;
 	FILE *file_ptr = NULL;
 	char *conn_type = NULL;
-	char extra[20];
+	char *extra = NULL;
 
 	ListIterator results_i;		
 	
-	memset(filename,0,20);
+	memset(filename,0,50);
 	if(len>5)
 		while(i<len) {
 			
@@ -922,44 +923,37 @@ static int _save_allocation(char *com, List allocated_blocks)
 		sprintf(filename,"bluegene.conf.%ld",
 			(long int) now_time);
 	}
+
 	file_ptr = fopen(filename,"w");
+
 	if (file_ptr!=NULL) {
-		fputs ("#\n# bluegene.conf file generated by smap\n", file_ptr);
-		fputs ("# See the bluegene.conf man page for more information\n",
-			file_ptr);
-		fputs ("#\n", file_ptr);
-		fputs ("BlrtsImage="
-		       "/bgl/BlueLight/ppcfloor/bglsys/bin/rts_hw.rts\n", 
-		       file_ptr);
-		fputs ("LinuxImage="
-		       "/bgl/BlueLight/ppcfloor/bglsys/bin/zImage.elf\n", 
-		       file_ptr);
-		fputs ("MloaderImage="
-		       "/bgl/BlueLight/ppcfloor/bglsys/bin/mmcs-mloader.rts\n",
-		       file_ptr);
-		fputs ("RamDiskImage="
-		       "/bgl/BlueLight/ppcfloor/bglsys/bin/ramdisk.elf\n", 
-		       file_ptr);
-		fputs ("BridgeAPILogFile="
-		       "/var/log/slurm/bridgeapi.log\n", 
-		       file_ptr);
-		fputs ("Numpsets=8\n", file_ptr);
-		fputs ("BridgeAPIVerbose=0\n", file_ptr);
-		sprintf(save_string, "BasePartitionNodeCnt=%d\n",
-			base_part_node_cnt);
-		fputs (save_string,file_ptr);
-		sprintf(save_string, "NodeCardNodeCnt=%d\n",
-			nodecard_node_cnt);
-		fputs (save_string,file_ptr);
-		sprintf(save_string, "LayoutMode=%s\n",
-			layout_mode);
-		fputs (save_string,file_ptr);
+		xstrcat(save_string,
+			"#\n# bluegene.conf file generated by smap\n");
+		xstrcat(save_string,
+			"# See the bluegene.conf man page for more information\n");
+		xstrcat(save_string, "#\n");
+		xstrcat(save_string, "BlrtsImage="
+		       "/bgl/BlueLight/ppcfloor/bglsys/bin/rts_hw.rts\n");
+		xstrcat(save_string, "LinuxImage="
+		       "/bgl/BlueLight/ppcfloor/bglsys/bin/zImage.elf\n");
+		xstrcat(save_string, "MloaderImage="
+		       "/bgl/BlueLight/ppcfloor/bglsys/bin/mmcs-mloader.rts\n");
+		xstrcat(save_string, "RamDiskImage="
+		       "/bgl/BlueLight/ppcfloor/bglsys/bin/ramdisk.elf\n");
+		xstrcat(save_string, "BridgeAPILogFile="
+		       "/var/log/slurm/bridgeapi.log\n");
+		xstrcat(save_string, "Numpsets=8\n");
+		xstrcat(save_string, "BridgeAPIVerbose=0\n");
+
+		xstrfmtcat(save_string, "BasePartitionNodeCnt=%d\n",
+			   base_part_node_cnt);
+		xstrfmtcat(save_string, "NodeCardNodeCnt=%d\n",
+			   nodecard_node_cnt);
+		xstrfmtcat(save_string, "LayoutMode=%s\n", layout_mode);
 
-		fputs("#\n# Block Layout\n#\n", file_ptr);
+		xstrfmtcat(save_string, "#\n# Block Layout\n#\n");
 		results_i = list_iterator_create(allocated_blocks);
 		while((allocated_block = list_next(results_i)) != NULL) {
-			memset(save_string,0,255);
-			memset(extra,0,20);
 			if(allocated_block->request->conn_type == SELECT_TORUS)
 				conn_type = "TORUS";
 			else if(allocated_block->request->conn_type 
@@ -967,17 +961,26 @@ static int _save_allocation(char *com, List allocated_blocks)
 				conn_type = "MESH";
 			else {
 				conn_type = "SMALL";
-				sprintf(extra, " NodeCards=%d Quarters=%d",
-					allocated_block->request->nodecards,
-					allocated_block->request->quarters);
+				xstrfmtcat(extra, " NodeCards=%d Quarters=%d",
+					   allocated_block->request->nodecards,
+					   allocated_block->request->quarters);
 			}
-			sprintf(save_string, "BPs=%s Type=%s%s\n", 
-				allocated_block->request->save_name, 
-				conn_type, extra);
-			fputs (save_string,file_ptr);
+			xstrfmtcat(save_string, "BPs=%s Type=%s", 
+				   allocated_block->request->save_name, 
+				   conn_type);
+			if(extra) {
+				xstrfmtcat(save_string, "%s\n", extra);
+				xfree(extra);
+			} else 
+				xstrcat(save_string, "\n");
+			
 		}
+		list_iterator_destroy(results_i);
+		fputs(save_string, file_ptr);
+		xfree(save_string);
 		fclose (file_ptr);
 	}
+
 	return 1;
 }
 
@@ -1276,7 +1279,7 @@ static void _print_text_command(allocated_block_t *allocated_block)
 	main_xcord += 7;
 
 	mvwprintw(text_win, main_ycord,
-		  main_xcord, "%d",allocated_block->request->size);
+		  main_xcord, "%d", allocated_block->request->size);
 	main_xcord += 10;
 	
 	if(allocated_block->request->conn_type == SELECT_SMALL) {
diff --git a/src/sreport/cluster_reports.c b/src/sreport/cluster_reports.c
index c28956716..2797f630a 100644
--- a/src/sreport/cluster_reports.c
+++ b/src/sreport/cluster_reports.c
@@ -38,6 +38,7 @@
 \*****************************************************************************/
 
 #include "cluster_reports.h"
+bool tree_display = 0;
 
 enum {
 	PRINT_CLUSTER_NAME,
@@ -47,7 +48,11 @@ enum {
 	PRINT_CLUSTER_ICPU,
 	PRINT_CLUSTER_OCPU,
 	PRINT_CLUSTER_RCPU,
-	PRINT_CLUSTER_TOTAL
+	PRINT_CLUSTER_TOTAL,
+	PRINT_CLUSTER_ACCT,
+	PRINT_CLUSTER_USER_LOGIN,
+	PRINT_CLUSTER_USER_PROPER,
+	PRINT_CLUSTER_AMOUNT_USED,
 };
 
 typedef enum {
@@ -62,18 +67,26 @@ typedef enum {
 
 static List print_fields_list = NULL; /* types are of print_field_t */
 
-static int _set_cond(int *start, int argc, char *argv[],
-		     acct_cluster_cond_t *cluster_cond,
-		     List format_list)
+static int _set_assoc_cond(int *start, int argc, char *argv[],
+			   acct_association_cond_t *assoc_cond,
+			   List format_list)
 {
 	int i;
 	int set = 0;
 	int end = 0;
 	int local_cluster_flag = all_clusters_flag;
+	time_t start_time, end_time;
+	
+	if(!assoc_cond) {
+		error("We need an acct_association_cond to call this");
+		return SLURM_ERROR;
+	}
 
-	if(!cluster_cond->cluster_list)
-		cluster_cond->cluster_list = list_create(slurm_destroy_char);
+	assoc_cond->with_usage = 1;
+	assoc_cond->with_deleted = 1;
 
+	if(!assoc_cond->cluster_list)
+		assoc_cond->cluster_list = list_create(slurm_destroy_char);
 	for (i=(*start); i<argc; i++) {
 		end = parse_option_end(argv[i]);
 		if (!strncasecmp (argv[i], "Set", 3)) {
@@ -83,20 +96,108 @@ static int _set_cond(int *start, int argc, char *argv[],
 			continue;
 		} else if(!end && !strncasecmp(argv[i], "all_clusters", 1)) {
 			local_cluster_flag = 1;
+		} else if (!end && !strncasecmp (argv[i], "Tree", 4)) {
+			tree_display = 1;
+		} else if(!end
+			  || !strncasecmp (argv[i], "Users", 1)) {
+			if(!assoc_cond->user_list)
+				assoc_cond->user_list = 
+					list_create(slurm_destroy_char);
+			slurm_addto_char_list(assoc_cond->user_list,
+					      argv[i]+end);
+			set = 1;
+		} else if (!strncasecmp (argv[i], "Accounts", 2)) {
+			if(!assoc_cond->acct_list)
+				assoc_cond->acct_list =
+					list_create(slurm_destroy_char);
+			slurm_addto_char_list(assoc_cond->acct_list,
+					argv[i]+end);
+			set = 1;
+		} else if (!strncasecmp (argv[i], "Clusters", 1)) {
+			slurm_addto_char_list(assoc_cond->cluster_list,
+					argv[i]+end);
+			set = 1;
+		} else if (!strncasecmp (argv[i], "End", 1)) {
+			assoc_cond->usage_end = parse_time(argv[i]+end, 1);
+			set = 1;
+		} else if (!strncasecmp (argv[i], "Format", 1)) {
+			if(format_list)
+				slurm_addto_char_list(format_list, 
+						      argv[i]+end);
+		} else if (!strncasecmp (argv[i], "Start", 1)) {
+			assoc_cond->usage_start = parse_time(argv[i]+end, 1);
+			set = 1;
+		} else {
+			exit_code=1;
+			fprintf(stderr, " Unknown condition: %s\n"
+			       "Use keyword set to modify value\n", argv[i]);
+		}
+	}
+	(*start) = i;
+
+	if(!local_cluster_flag && !list_count(assoc_cond->cluster_list)) {
+		char *temp = slurm_get_cluster_name();
+		if(temp)
+			list_append(assoc_cond->cluster_list, temp);
+	}
+
+	/* This needs to be done on some systems to make sure
+	   cluster_cond isn't messed.  This has happened on some 64
+	   bit machines and this is here to be on the safe side.
+	*/
+	start_time = assoc_cond->usage_start;
+	end_time = assoc_cond->usage_end;
+	set_start_end_time(&start_time, &end_time);
+	assoc_cond->usage_start = start_time;
+	assoc_cond->usage_end = end_time;
+
+	return set;
+}
+
+static int _set_cluster_cond(int *start, int argc, char *argv[],
+			     acct_cluster_cond_t *cluster_cond,
+			     List format_list)
+{
+	int i;
+	int set = 0;
+	int end = 0;
+	int local_cluster_flag = all_clusters_flag;
+	time_t start_time, end_time;
+
+	if(!cluster_cond) {
+		error("We need an acct_cluster_cond to call this");
+		return SLURM_ERROR;
+	}
+
+	cluster_cond->with_deleted = 1;
+	cluster_cond->with_usage = 1;
+
+	if(!cluster_cond->cluster_list)
+		cluster_cond->cluster_list = list_create(slurm_destroy_char);
+	for (i=(*start); i<argc; i++) {
+		end = parse_option_end(argv[i]);
+		if (!strncasecmp (argv[i], "Set", 3)) {
+			i--;
+			break;
+		} else if(!end && !strncasecmp(argv[i], "where", 5)) {
 			continue;
+		} else if(!end && !strncasecmp(argv[i], "all_clusters", 1)) {
+			local_cluster_flag = 1;
 		} else if(!end
+			  || !strncasecmp (argv[i], "Clusters", 1)
 			  || !strncasecmp (argv[i], "Names", 1)) {
 			slurm_addto_char_list(cluster_cond->cluster_list,
-					      argv[i]);
+					      argv[i]+end);
 			set = 1;
 		} else if (!strncasecmp (argv[i], "End", 1)) {
-			cluster_cond->usage_end = parse_time(argv[i]+end);
+			cluster_cond->usage_end = parse_time(argv[i]+end, 1);
 			set = 1;
 		} else if (!strncasecmp (argv[i], "Format", 1)) {
 			if(format_list)
-				slurm_addto_char_list(format_list, argv[i]+end);
+				slurm_addto_char_list(format_list,
+						      argv[i]+end);
 		} else if (!strncasecmp (argv[i], "Start", 1)) {
-			cluster_cond->usage_start = parse_time(argv[i]+end);
+			cluster_cond->usage_start = parse_time(argv[i]+end, 1);
 			set = 1;
 		} else {
 			exit_code=1;
@@ -112,8 +213,15 @@ static int _set_cond(int *start, int argc, char *argv[],
 			list_append(cluster_cond->cluster_list, temp);
 	}
 
-	set_start_end_time((time_t *)&cluster_cond->usage_start,
-			   (time_t *)&cluster_cond->usage_end);
+	/* This needs to be done on some systems to make sure
+	   cluster_cond isn't messed.  This has happened on some 64
+	   bit machines and this is here to be on the safe side.
+	*/
+	start_time = cluster_cond->usage_start;
+	end_time = cluster_cond->usage_end;
+	set_start_end_time(&start_time, &end_time);
+	cluster_cond->usage_start = start_time;
+	cluster_cond->usage_end = end_time;
 
 	return set;
 }
@@ -136,8 +244,27 @@ static int _setup_print_fields_list(List format_list)
 
 	itr = list_iterator_create(format_list);
 	while((object = list_next(itr))) {
+		char *tmp_char = NULL;
 		field = xmalloc(sizeof(print_field_t));
-		if(!strncasecmp("Cluster", object, 2)) {
+		if(!strncasecmp("Accounts", object, 2)) {
+			field->type = PRINT_CLUSTER_ACCT;
+			field->name = xstrdup("Account");
+			if(tree_display)
+				field->len = 20;
+			else
+				field->len = 15;
+			field->print_routine = print_fields_str;
+		} else if(!strncasecmp("allocated", object, 2)) {
+			field->type = PRINT_CLUSTER_ACPU;
+			field->name = xstrdup("Allocated");
+			if(time_format == SREPORT_TIME_SECS_PER
+			   || time_format == SREPORT_TIME_MINS_PER
+			   || time_format == SREPORT_TIME_HOURS_PER)
+				field->len = 20;
+			else
+				field->len = 12;
+			field->print_routine = sreport_print_time;
+		} else if(!strncasecmp("Cluster", object, 2)) {
 			field->type = PRINT_CLUSTER_NAME;
 			field->name = xstrdup("Cluster");
 			field->len = 9;
@@ -147,18 +274,12 @@ static int _setup_print_fields_list(List format_list)
 			field->name = xstrdup("CPU count");
 			field->len = 9;
 			field->print_routine = print_fields_uint;
-		} else if(!strncasecmp("allocated", object, 1)) {
-			field->type = PRINT_CLUSTER_ACPU;
-			field->name = xstrdup("Allocated");
-			if(time_format == SREPORT_TIME_SECS_PER)
-				field->len = 20;
-			else
-				field->len = 12;
-			field->print_routine = sreport_print_time;
 		} else if(!strncasecmp("down", object, 1)) {
 			field->type = PRINT_CLUSTER_DCPU;
 			field->name = xstrdup("Down");
-			if(time_format == SREPORT_TIME_SECS_PER)
+			if(time_format == SREPORT_TIME_SECS_PER
+			   || time_format == SREPORT_TIME_MINS_PER
+			   || time_format == SREPORT_TIME_HOURS_PER)
 				field->len = 18;
 			else
 				field->len = 10;
@@ -166,23 +287,39 @@ static int _setup_print_fields_list(List format_list)
 		} else if(!strncasecmp("idle", object, 1)) {
 			field->type = PRINT_CLUSTER_ICPU;
 			field->name = xstrdup("Idle");
-			if(time_format == SREPORT_TIME_SECS_PER)
+			if(time_format == SREPORT_TIME_SECS_PER
+			   || time_format == SREPORT_TIME_MINS_PER
+			   || time_format == SREPORT_TIME_HOURS_PER)
 				field->len = 20;
 			else
 				field->len = 12;
 			field->print_routine = sreport_print_time;
+		} else if(!strncasecmp("Login", object, 1)) {
+			field->type = PRINT_CLUSTER_USER_LOGIN;
+			field->name = xstrdup("Login");
+			field->len = 9;
+			field->print_routine = print_fields_str;
 		} else if(!strncasecmp("overcommited", object, 1)) {
 			field->type = PRINT_CLUSTER_OCPU;
 			field->name = xstrdup("Over Comm");
-			if(time_format == SREPORT_TIME_SECS_PER)
+			if(time_format == SREPORT_TIME_SECS_PER
+			   || time_format == SREPORT_TIME_MINS_PER
+			   || time_format == SREPORT_TIME_HOURS_PER)
 				field->len = 18;
 			else
 				field->len = 9;
 			field->print_routine = sreport_print_time;
+		} else if(!strncasecmp("Proper", object, 1)) {
+			field->type = PRINT_CLUSTER_USER_PROPER;
+			field->name = xstrdup("Proper Name");
+			field->len = 15;
+			field->print_routine = print_fields_str;
 		} else if(!strncasecmp("reported", object, 3)) {
 			field->type = PRINT_CLUSTER_TOTAL;
 			field->name = xstrdup("Reported");
-			if(time_format == SREPORT_TIME_SECS_PER)
+			if(time_format == SREPORT_TIME_SECS_PER
+			   || time_format == SREPORT_TIME_MINS_PER
+			   || time_format == SREPORT_TIME_HOURS_PER)
 				field->len = 20;
 			else
 				field->len = 12;
@@ -190,17 +327,34 @@ static int _setup_print_fields_list(List format_list)
 		} else if(!strncasecmp("reserved", object, 3)) {
 			field->type = PRINT_CLUSTER_RCPU;
 			field->name = xstrdup("Reserved");
-			if(time_format == SREPORT_TIME_SECS_PER)
+			if(time_format == SREPORT_TIME_SECS_PER
+			   || time_format == SREPORT_TIME_MINS_PER
+			   || time_format == SREPORT_TIME_HOURS_PER)
 				field->len = 18;
 			else
 				field->len = 9;
 			field->print_routine = sreport_print_time;
+		} else if(!strncasecmp("Used", object, 1)) {
+			field->type = PRINT_CLUSTER_AMOUNT_USED;
+			field->name = xstrdup("Used");
+			if(time_format == SREPORT_TIME_SECS_PER
+			   || time_format == SREPORT_TIME_MINS_PER
+			   || time_format == SREPORT_TIME_HOURS_PER)
+				field->len = 18;
+			else
+				field->len = 10;
+			field->print_routine = sreport_print_time;
 		} else {
 			exit_code=1;
 			fprintf(stderr, " Unknown field '%s'\n", object);
 			xfree(field);
 			continue;
 		}
+		if((tmp_char = strstr(object, "\%"))) {
+			int newlen = atoi(tmp_char+1);
+			if(newlen > 0) 
+				field->len = newlen;
+		}
 		list_append(print_fields_list, field);		
 	}
 	list_iterator_destroy(itr);
@@ -216,9 +370,10 @@ static List _get_cluster_list(int argc, char *argv[], uint32_t *total_time,
 	int i=0;
 	List cluster_list = NULL;
 
+	cluster_cond->with_deleted = 1;
 	cluster_cond->with_usage = 1;
 
-	_set_cond(&i, argc, argv, cluster_cond, format_list);
+	_set_cluster_cond(&i, argc, argv, cluster_cond, format_list);
 	
 	cluster_list = acct_storage_g_get_clusters(db_conn, my_uid,
 						   cluster_cond);
@@ -231,9 +386,10 @@ static List _get_cluster_list(int argc, char *argv[], uint32_t *total_time,
 	if(print_fields_have_header) {
 		char start_char[20];
 		char end_char[20];
+		time_t my_start = cluster_cond->usage_start;
 		time_t my_end = cluster_cond->usage_end-1;
 
-		slurm_make_time_str((time_t *)&cluster_cond->usage_start, 
+		slurm_make_time_str(&my_start, 
 				    start_char, sizeof(start_char));
 		slurm_make_time_str(&my_end,
 				    end_char, sizeof(end_char));
@@ -242,6 +398,14 @@ static List _get_cluster_list(int argc, char *argv[], uint32_t *total_time,
 		printf("%s %s - %s (%d*cpus secs)\n", 
 		       report_name, start_char, end_char, 
 		       (cluster_cond->usage_end - cluster_cond->usage_start));
+		switch(time_format) {
+		case SREPORT_TIME_PERCENT:
+			printf("Time reported in %s\n", time_format_string);
+			break; 
+		default:
+			printf("Time reported in CPU %s\n", time_format_string);
+			break;
+		}
 		printf("----------------------------------------"
 		       "----------------------------------------\n");
 	}
@@ -277,7 +441,7 @@ extern int cluster_utilization(int argc, char *argv[])
 		goto end_it;
 
 	if(!list_count(format_list)) 
-		slurm_addto_char_list(format_list, "Cl,a,d,i,res,rep");
+		slurm_addto_char_list(format_list, "Cl,al,d,i,res,rep");
 
 	_setup_print_fields_list(format_list);
 	list_destroy(format_list);
@@ -314,17 +478,19 @@ extern int cluster_utilization(int argc, char *argv[])
 		list_iterator_destroy(itr3);
 
 		total_acct.cpu_count /= list_count(cluster->accounting_list);
-		local_total_time = total_time * total_acct.cpu_count;
+		
+		local_total_time =
+			(uint64_t)total_time * (uint64_t)total_acct.cpu_count;
 		total_reported = total_acct.alloc_secs + total_acct.down_secs 
 			+ total_acct.idle_secs + total_acct.resv_secs;
-		
+
 		while((field = list_next(itr2))) {
 			switch(field->type) {
 			case PRINT_CLUSTER_NAME:
 				field->print_routine(field,
 						     cluster->name,
 						     (curr_inx == 
-						      field_count));		
+						      field_count));
 				break;
 			case PRINT_CLUSTER_CPUS:
 				field->print_routine(field,
@@ -375,6 +541,9 @@ extern int cluster_utilization(int argc, char *argv[])
 						      field_count));
 				break;
 			default:
+				field->print_routine(
+					field, NULL,
+					(curr_inx == field_count));
 				break;
 			}
 			curr_inx++;
@@ -400,3 +569,585 @@ end_it:
 	return rc;
 }
 
+extern int cluster_user_by_account(int argc, char *argv[])
+{
+	int rc = SLURM_SUCCESS;
+	acct_association_cond_t *assoc_cond =
+		xmalloc(sizeof(acct_association_cond_t));
+	acct_cluster_cond_t cluster_cond;
+	ListIterator itr = NULL;
+	ListIterator itr2 = NULL;
+	ListIterator assoc_itr = NULL;
+	ListIterator cluster_itr = NULL;
+	List format_list = list_create(slurm_destroy_char);
+	List assoc_list = NULL;
+	List cluster_list = NULL;
+	List sreport_cluster_list = list_create(destroy_sreport_cluster_rec);
+	int i=0;
+	acct_cluster_rec_t *cluster = NULL;
+	acct_association_rec_t *assoc = NULL;
+	sreport_user_rec_t *sreport_user = NULL;
+	sreport_cluster_rec_t *sreport_cluster = NULL;
+	print_field_t *field = NULL;
+	int field_count = 0;
+
+	print_fields_list = list_create(destroy_print_field);
+
+	bzero(&cluster_cond, sizeof(acct_cluster_cond_t));
+
+	_set_assoc_cond(&i, argc, argv, assoc_cond, format_list);
+
+	if(!list_count(format_list)) 
+		slurm_addto_char_list(format_list, "Cluster,L,P,Ac,Used");
+
+	_setup_print_fields_list(format_list);
+	list_destroy(format_list);
+
+	cluster_cond.with_deleted = 1;
+	cluster_cond.with_usage = 1;
+	cluster_cond.usage_end = assoc_cond->usage_end;
+	cluster_cond.usage_start = assoc_cond->usage_start;
+	cluster_cond.cluster_list = assoc_cond->cluster_list;
+	cluster_list = acct_storage_g_get_clusters(
+		db_conn, my_uid, &cluster_cond);
+
+	if(!cluster_list) {
+		exit_code=1;
+		fprintf(stderr, " Problem with cluster query.\n");
+		goto end_it;
+	}
+	assoc_list = acct_storage_g_get_associations(db_conn, my_uid,
+						     assoc_cond);
+	if(!assoc_list) {
+		exit_code=1;
+		fprintf(stderr, " Problem with assoc query.\n");
+		goto end_it;
+	}
+
+	/* set up the structures for easy reteval later */
+	itr = list_iterator_create(cluster_list);
+	assoc_itr = list_iterator_create(assoc_list);
+	while((cluster = list_next(itr))) {
+		cluster_accounting_rec_t *accting = NULL;
+
+		/* check to see if this cluster is around during the
+		   time we are looking at */
+		if(!cluster->accounting_list
+		   || !list_count(cluster->accounting_list))
+			continue;
+
+		sreport_cluster = xmalloc(sizeof(sreport_cluster_rec_t));
+
+		list_append(sreport_cluster_list, sreport_cluster);
+
+		sreport_cluster->name = xstrdup(cluster->name);
+		sreport_cluster->user_list = 
+			list_create(destroy_sreport_user_rec);
+
+		/* get the amount of time and the average cpu count
+		   during the time we are looking at */
+		itr2 = list_iterator_create(cluster->accounting_list);
+		while((accting = list_next(itr2))) {
+			sreport_cluster->cpu_secs += accting->alloc_secs 
+				+ accting->down_secs + accting->idle_secs 
+				+ accting->resv_secs;
+			sreport_cluster->cpu_count += accting->cpu_count;
+		}
+		list_iterator_destroy(itr2);
+
+		sreport_cluster->cpu_count /= 
+			list_count(cluster->accounting_list);
+		
+		/* now add the associations of interest here by user */
+		while((assoc = list_next(assoc_itr))) {
+			struct passwd *passwd_ptr = NULL;
+			uid_t uid = NO_VAL;
+			ListIterator user_itr = NULL;
+			acct_accounting_rec_t *accting2 = NULL;
+
+			if(!assoc->accounting_list
+			   || !list_count(assoc->accounting_list)
+			   || !assoc->user) {
+				list_delete_item(assoc_itr);
+				continue;
+			}
+
+			if(strcmp(cluster->name, assoc->cluster)) 
+				continue;
+
+			/* make sure we add all associations to this
+			   user rec because we could have some in
+			   partitions which would create another
+			   record otherwise
+			*/
+			user_itr = list_iterator_create(
+				sreport_cluster->user_list); 
+			while((sreport_user = list_next(user_itr))) {
+				if(!strcmp(sreport_user->name, assoc->user)
+				   && !strcmp(sreport_user->acct, assoc->acct))
+					break;				
+			}
+			list_iterator_destroy(user_itr);
+
+			if(!sreport_user) {
+				passwd_ptr = getpwnam(assoc->user);
+				if(passwd_ptr) 
+					uid = passwd_ptr->pw_uid;
+				/* In this report we are using the sreport user
+				   structure to store the information we want
+				   since it is already avaliable and will do
+				   pretty much what we want.
+				*/
+				sreport_user =
+					xmalloc(sizeof(sreport_user_rec_t));
+				sreport_user->name = xstrdup(assoc->user);
+				sreport_user->uid = uid;
+				sreport_user->acct = xstrdup(assoc->acct);
+	
+				list_append(sreport_cluster->user_list,
+					    sreport_user);
+			} 
+
+			/* get the amount of time this assoc used
+			   during the time we are looking at */
+			itr2 = list_iterator_create(assoc->accounting_list);
+			while((accting2 = list_next(itr2))) {
+				sreport_user->cpu_secs += 
+					(uint64_t)accting2->alloc_secs;
+			}
+			list_iterator_destroy(itr2);
+			list_delete_item(assoc_itr);
+		}
+		list_iterator_reset(assoc_itr);
+	}
+	list_iterator_destroy(assoc_itr);
+	list_iterator_destroy(itr);
+
+	list_destroy(cluster_list);
+	cluster_list = NULL;
+	list_destroy(assoc_list);
+	assoc_list = NULL;
+
+	if(print_fields_have_header) {
+		char start_char[20];
+		char end_char[20];
+		time_t my_start = assoc_cond->usage_start;
+		time_t my_end = assoc_cond->usage_end-1;
+		
+		slurm_make_time_str(&my_start, start_char, sizeof(start_char));
+		slurm_make_time_str(&my_end, end_char, sizeof(end_char));
+		printf("----------------------------------------"
+		       "----------------------------------------\n");
+		printf("Cluster/User/Account Utilization %s - %s (%d secs)\n", 
+		       start_char, end_char, 
+		       (assoc_cond->usage_end - assoc_cond->usage_start));
+		
+		switch(time_format) {
+		case SREPORT_TIME_PERCENT:
+			printf("Time reported in %s\n", time_format_string);
+			break; 
+		default:
+			printf("Time reported in CPU %s\n", 
+			       time_format_string);
+			break;
+		}
+		printf("----------------------------------------"
+		       "----------------------------------------\n");
+	}
+
+	itr2 = list_iterator_create(print_fields_list);
+	print_fields_header(print_fields_list);
+
+	field_count = list_count(print_fields_list);
+	cluster_itr = list_iterator_create(sreport_cluster_list);
+	while((sreport_cluster = list_next(cluster_itr))) {
+		list_sort(sreport_cluster->user_list, (ListCmpF)sort_user_dec);
+	
+		itr = list_iterator_create(sreport_cluster->user_list);
+		while((sreport_user = list_next(itr))) {
+			int curr_inx = 1;
+
+			/* we don't care if they didn't use any time */
+			if(!sreport_user->cpu_secs)
+				continue;
+
+			while((field = list_next(itr2))) {
+				char *tmp_char = NULL;
+				struct passwd *pwd = NULL;
+				switch(field->type) {
+				case PRINT_CLUSTER_ACCT:
+					field->print_routine(
+						field,
+						sreport_user->acct,
+						(curr_inx == field_count));
+					break;
+				case PRINT_CLUSTER_NAME:
+					field->print_routine(
+						field,
+						sreport_cluster->name,
+						(curr_inx == field_count));
+					break;
+				case PRINT_CLUSTER_USER_LOGIN:
+					field->print_routine(
+						field,
+						sreport_user->name,
+						(curr_inx == field_count));
+					break;
+				case PRINT_CLUSTER_USER_PROPER:
+					pwd = getpwnam(sreport_user->name);
+					if(pwd) {
+						tmp_char = 
+							strtok(pwd->pw_gecos,
+							       ",");
+						if(!tmp_char)
+							tmp_char = 
+								pwd->pw_gecos;
+					}
+					field->print_routine(field,
+							     tmp_char,
+							     (curr_inx == 
+							      field_count));
+					break;
+				case PRINT_CLUSTER_AMOUNT_USED:
+					field->print_routine(
+						field,
+						sreport_user->cpu_secs,
+						sreport_cluster->cpu_secs,
+						(curr_inx == field_count));
+					break;
+				default:
+					field->print_routine(
+						field, NULL,
+						(curr_inx == field_count));
+					break;
+				}
+				curr_inx++;
+			}
+			list_iterator_reset(itr2);
+			printf("\n");
+		}
+		list_iterator_destroy(itr);
+	}
+	list_iterator_destroy(cluster_itr);
+end_it:
+	destroy_acct_association_cond(assoc_cond);
+	
+	if(assoc_list) {
+		list_destroy(assoc_list);
+		assoc_list = NULL;
+	}
+	
+	if(cluster_list) {
+		list_destroy(cluster_list);
+		cluster_list = NULL;
+	}
+	
+	if(sreport_cluster_list) {
+		list_destroy(sreport_cluster_list);
+		sreport_cluster_list = NULL;
+	}
+	
+	if(print_fields_list) {
+		list_destroy(print_fields_list);
+		print_fields_list = NULL;
+	}
+
+	return rc;
+}
+
+extern int cluster_account_by_user(int argc, char *argv[])
+{
+	int rc = SLURM_SUCCESS;
+	acct_association_cond_t *assoc_cond = 
+		xmalloc(sizeof(acct_association_cond_t));
+	acct_cluster_cond_t cluster_cond;
+	ListIterator itr = NULL;
+	ListIterator itr2 = NULL;
+	ListIterator assoc_itr = NULL;
+	ListIterator cluster_itr = NULL;
+	List format_list = list_create(slurm_destroy_char);
+	List assoc_list = NULL;
+	List cluster_list = NULL;
+	List sreport_cluster_list = list_create(destroy_sreport_cluster_rec);
+	List tree_list = NULL;
+	int i=0;
+	acct_cluster_rec_t *cluster = NULL;
+	acct_association_rec_t *assoc = NULL;
+	sreport_assoc_rec_t *sreport_assoc = NULL;
+	sreport_cluster_rec_t *sreport_cluster = NULL;
+	print_field_t *field = NULL;
+	int field_count = 0;
+	char *print_acct = NULL;
+
+	print_fields_list = list_create(destroy_print_field);
+
+	bzero(&cluster_cond, sizeof(acct_cluster_cond_t));
+
+	assoc_cond->with_sub_accts = 1;
+
+	_set_assoc_cond(&i, argc, argv, assoc_cond, format_list);
+
+	if(!list_count(format_list)) 
+		slurm_addto_char_list(format_list, "Cluster,Ac,L,P,Used");
+
+	_setup_print_fields_list(format_list);
+	list_destroy(format_list);
+
+	cluster_cond.with_deleted = 1;
+	cluster_cond.with_usage = 1;
+	cluster_cond.usage_end = assoc_cond->usage_end;
+	cluster_cond.usage_start = assoc_cond->usage_start;
+	cluster_cond.cluster_list = assoc_cond->cluster_list;
+	cluster_list = acct_storage_g_get_clusters(
+		db_conn, my_uid, &cluster_cond);
+
+	if(!cluster_list) {
+		exit_code=1;
+		fprintf(stderr, " Problem with cluster query.\n");
+		goto end_it;
+	}
+	assoc_list = acct_storage_g_get_associations(db_conn, my_uid,
+						     assoc_cond);
+	if(!assoc_list) {
+		exit_code=1;
+		fprintf(stderr, " Problem with assoc query.\n");
+		goto end_it;
+	}
+
+	/* set up the structures for easy retrieval later */
+	itr = list_iterator_create(cluster_list);
+	assoc_itr = list_iterator_create(assoc_list);
+	while((cluster = list_next(itr))) {
+		cluster_accounting_rec_t *accting = NULL;
+
+		/* check to see if this cluster is around during the
+		   time we are looking at */
+		if(!cluster->accounting_list
+		   || !list_count(cluster->accounting_list))
+			continue;
+
+		sreport_cluster = xmalloc(sizeof(sreport_cluster_rec_t));
+
+		list_append(sreport_cluster_list, sreport_cluster);
+
+		sreport_cluster->name = xstrdup(cluster->name);
+		sreport_cluster->assoc_list = 
+			list_create(destroy_sreport_assoc_rec);
+
+		/* get the amount of time and the average cpu count
+		   during the time we are looking at */
+		itr2 = list_iterator_create(cluster->accounting_list);
+		while((accting = list_next(itr2))) {
+			sreport_cluster->cpu_secs += accting->alloc_secs 
+				+ accting->down_secs + accting->idle_secs 
+				+ accting->resv_secs;
+			sreport_cluster->cpu_count += accting->cpu_count;
+		}
+		list_iterator_destroy(itr2);
+
+		sreport_cluster->cpu_count /= 
+			list_count(cluster->accounting_list);
+		
+		/* now add the associations of interest here by user */
+		while((assoc = list_next(assoc_itr))) {
+			acct_accounting_rec_t *accting2 = NULL;
+
+			if(!assoc->accounting_list
+			   || !list_count(assoc->accounting_list)) {
+				list_delete_item(assoc_itr);
+				continue;
+			}
+
+			if(strcmp(cluster->name, assoc->cluster)) 
+				continue;
+
+			sreport_assoc = xmalloc(sizeof(sreport_assoc_rec_t));
+			
+			list_append(sreport_cluster->assoc_list, 
+				    sreport_assoc);
+
+			sreport_assoc->acct = xstrdup(assoc->acct);
+			sreport_assoc->parent_acct =
+				xstrdup(assoc->parent_acct);
+			sreport_assoc->user = xstrdup(assoc->user);
+				
+			/* get the amount of time this assoc used
+			   during the time we are looking at */
+			itr2 = list_iterator_create(assoc->accounting_list);
+			while((accting2 = list_next(itr2))) {
+				sreport_assoc->cpu_secs += 
+					(uint64_t)accting2->alloc_secs;
+			}
+			list_iterator_destroy(itr2);
+			list_delete_item(assoc_itr);		
+		}
+		list_iterator_reset(assoc_itr);
+	}
+	list_iterator_destroy(assoc_itr);
+	list_iterator_destroy(itr);
+
+	list_destroy(cluster_list);
+	cluster_list = NULL;
+	list_destroy(assoc_list);
+	assoc_list = NULL;
+
+	if(print_fields_have_header) {
+		char start_char[20];
+		char end_char[20];
+		time_t my_start = assoc_cond->usage_start;
+		time_t my_end = assoc_cond->usage_end-1;
+		
+		slurm_make_time_str(&my_start, start_char, sizeof(start_char));
+		slurm_make_time_str(&my_end, end_char, sizeof(end_char));
+		printf("----------------------------------------"
+		       "----------------------------------------\n");
+		printf("Cluster/Account/User Utilization %s - %s (%d secs)\n", 
+		       start_char, end_char, 
+		       (assoc_cond->usage_end - assoc_cond->usage_start));
+		
+		switch(time_format) {
+		case SREPORT_TIME_PERCENT:
+			printf("Time reported in %s\n", time_format_string);
+			break; 
+		default:
+			printf("Time reported in CPU %s\n", 
+			       time_format_string);
+			break;
+		}
+		printf("----------------------------------------"
+		       "----------------------------------------\n");
+	}
+
+	itr2 = list_iterator_create(print_fields_list);
+	print_fields_header(print_fields_list);
+
+	field_count = list_count(print_fields_list);
+	list_sort(sreport_cluster_list, (ListCmpF)sort_cluster_dec);
+	
+	cluster_itr = list_iterator_create(sreport_cluster_list);
+	while((sreport_cluster = list_next(cluster_itr))) {
+		//list_sort(sreport_cluster->assoc_list, 
+		//  (ListCmpF)sort_assoc_dec);
+		if(tree_list) 
+			list_flush(tree_list);
+		else 
+			tree_list = list_create(destroy_acct_print_tree);
+		
+		itr = list_iterator_create(sreport_cluster->assoc_list);
+		while((sreport_assoc = list_next(itr))) {
+			int curr_inx = 1;
+			if(!sreport_assoc->cpu_secs)
+				continue;
+			while((field = list_next(itr2))) {
+				char *tmp_char = NULL;
+				struct passwd *pwd = NULL;
+				switch(field->type) {
+				case PRINT_CLUSTER_ACCT:
+					if(tree_display) {
+						char *local_acct = NULL;
+						char *parent_acct = NULL;
+						if(sreport_assoc->user) {
+							local_acct =
+								xstrdup_printf(
+									"|%s", 
+									sreport_assoc->acct);
+							parent_acct =
+								sreport_assoc->acct;
+						} else {
+							local_acct = xstrdup(
+								sreport_assoc->acct);
+							parent_acct = sreport_assoc->
+								parent_acct;
+						}
+						print_acct = get_tree_acct_name(
+							local_acct,
+							parent_acct,
+							sreport_cluster->name,
+							tree_list);
+						xfree(local_acct);
+					} else {
+						print_acct =
+							sreport_assoc->acct;
+					}
+					field->print_routine(
+						field, 
+						print_acct,
+						(curr_inx == field_count));
+					
+					break;
+				case PRINT_CLUSTER_NAME:
+					field->print_routine(
+						field,
+						sreport_cluster->name,
+						(curr_inx == field_count));
+					break;
+				case PRINT_CLUSTER_USER_LOGIN:
+					field->print_routine(
+						field,
+						sreport_assoc->user,
+						(curr_inx == field_count));
+					break;
+				case PRINT_CLUSTER_USER_PROPER:
+					if(sreport_assoc->user)
+						pwd = getpwnam(
+							sreport_assoc->user);
+					if(pwd) {
+						tmp_char = 
+							strtok(pwd->pw_gecos,
+							       ",");
+						if(!tmp_char)
+							tmp_char = 
+								pwd->pw_gecos;
+					}
+					field->print_routine(field,
+							     tmp_char,
+							     (curr_inx == 
+							      field_count));
+					break;
+				case PRINT_CLUSTER_AMOUNT_USED:
+					field->print_routine(
+						field,
+						sreport_assoc->cpu_secs,
+						sreport_cluster->cpu_secs,
+						(curr_inx == field_count));
+					break;
+				default:
+					field->print_routine(
+						field, NULL,
+						(curr_inx == field_count));
+					break;
+				}
+				curr_inx++;
+			}
+			list_iterator_reset(itr2);
+			printf("\n");
+		}
+		list_iterator_destroy(itr);
+	}
+	list_iterator_destroy(cluster_itr);
+end_it:
+	destroy_acct_association_cond(assoc_cond);
+	
+	if(assoc_list) {
+		list_destroy(assoc_list);
+		assoc_list = NULL;
+	}
+	
+	if(cluster_list) {
+		list_destroy(cluster_list);
+		cluster_list = NULL;
+	}
+	
+	if(sreport_cluster_list) {
+		list_destroy(sreport_cluster_list);
+		sreport_cluster_list = NULL;
+	}
+	
+	if(print_fields_list) {
+		list_destroy(print_fields_list);
+		print_fields_list = NULL;
+	}
+
+	return rc;
+}
+
diff --git a/src/sreport/cluster_reports.h b/src/sreport/cluster_reports.h
index 0901fb82b..7208ae771 100644
--- a/src/sreport/cluster_reports.h
+++ b/src/sreport/cluster_reports.h
@@ -43,5 +43,7 @@
 #include "sreport.h"
 
 extern int cluster_utilization(int argc, char *argv[]);
+extern int cluster_user_by_account(int argc, char *argv[]);
+extern int cluster_account_by_user(int argc, char *argv[]);
 
 #endif
diff --git a/src/sreport/common.c b/src/sreport/common.c
index 256306c00..254f99d40 100644
--- a/src/sreport/common.c
+++ b/src/sreport/common.c
@@ -42,7 +42,7 @@
 extern void sreport_print_time(print_field_t *field,
 			       uint64_t value, uint64_t total_time, int last)
 {
-	if(!total_time)
+	if(!total_time) 
 		total_time = 1;
 
 	/* (value == unset)  || (value == cleared) */
@@ -58,11 +58,20 @@ extern void sreport_print_time(print_field_t *field,
 	} else {
 		char *output = NULL;
 		double percent = (double)value;
+		double temp_d = (double)value;
 		
 		switch(time_format) {
 		case SREPORT_TIME_SECS:
 			output = xstrdup_printf("%llu", value);
 			break;
+		case SREPORT_TIME_MINS:
+			temp_d /= 60;
+			output = xstrdup_printf("%.0lf", temp_d);
+			break;
+		case SREPORT_TIME_HOURS:
+			temp_d /= 3600;
+			output = xstrdup_printf("%.0lf", temp_d);
+			break;
 		case SREPORT_TIME_PERCENT:
 			percent /= total_time;
 			percent *= 100;
@@ -74,8 +83,23 @@ extern void sreport_print_time(print_field_t *field,
 			output = xstrdup_printf("%llu(%.2lf%%)",
 						value, percent);
 			break;
+		case SREPORT_TIME_MINS_PER:
+			percent /= total_time;
+			percent *= 100;
+			temp_d /= 60;
+			output = xstrdup_printf("%.0lf(%.2lf%%)",
+						temp_d, percent);
+			break;
+		case SREPORT_TIME_HOURS_PER:
+			percent /= total_time;
+			percent *= 100;
+			temp_d /= 3600;
+			output = xstrdup_printf("%.0lf(%.2lf%%)",
+						temp_d, percent);
+			break;
 		default:
-			output = xstrdup_printf("%llu", value);
+			temp_d /= 60;
+			output = xstrdup_printf("%.0lf", temp_d);
 			break;
 		}
 		
@@ -86,7 +110,7 @@ extern void sreport_print_time(print_field_t *field,
 		else if(print_fields_parsable_print)
 			printf("%s|", output);	
 		else
-			printf("%*s ", field->len, output);
+			printf("%*.*s ", field->len, field->len, output);
 		xfree(output);
 	}
 }
@@ -199,20 +223,24 @@ extern void addto_char_list(List char_list, char *names)
 extern int set_start_end_time(time_t *start, time_t *end)
 {
 	time_t my_time = time(NULL);
+	time_t temp_time;
 	struct tm start_tm;
 	struct tm end_tm;
+	int sent_start = (*start), sent_end = (*end);
 
+//	info("now got %d and %d sent", (*start), (*end));
 	/* Default is going to be the last day */
-	if(!(*end)) {
+	if(!sent_end) {
 		if(!localtime_r(&my_time, &end_tm)) {
 			error("Couldn't get localtime from end %d",
 			      my_time);
 			return SLURM_ERROR;
 		}
 		end_tm.tm_hour = 0;
-		(*end) = mktime(&end_tm);		
+		//(*end) = mktime(&end_tm);		
 	} else {
-		if(!localtime_r(end, &end_tm)) {
+		temp_time = sent_end;
+		if(!localtime_r(&temp_time, &end_tm)) {
 			error("Couldn't get localtime from user end %d",
 			      my_time);
 			return SLURM_ERROR;
@@ -223,7 +251,7 @@ extern int set_start_end_time(time_t *start, time_t *end)
 	end_tm.tm_isdst = -1;
 	(*end) = mktime(&end_tm);		
 
-	if(!(*start)) {
+	if(!sent_start) {
 		if(!localtime_r(&my_time, &start_tm)) {
 			error("Couldn't get localtime from start %d",
 			      my_time);
@@ -231,9 +259,10 @@ extern int set_start_end_time(time_t *start, time_t *end)
 		}
 		start_tm.tm_hour = 0;
 		start_tm.tm_mday--;
-		(*start) = mktime(&start_tm);		
+		//(*start) = mktime(&start_tm);		
 	} else {
-		if(!localtime_r(start, &start_tm)) {
+		temp_time = sent_start;
+		if(!localtime_r(&temp_time, &start_tm)) {
 			error("Couldn't get localtime from user start %d",
 			      my_time);
 			return SLURM_ERROR;
@@ -246,6 +275,143 @@ extern int set_start_end_time(time_t *start, time_t *end)
 
 	if((*end)-(*start) < 3600) 
 		(*end) = (*start) + 3600;
+//	info("now got %d and %d sent", (*start), (*end));
 
 	return SLURM_SUCCESS;
 }
+
+extern void destroy_sreport_assoc_rec(void *object)
+{
+	sreport_assoc_rec_t *sreport_assoc = (sreport_assoc_rec_t *)object;
+	if(sreport_assoc) {
+		xfree(sreport_assoc->acct);
+		xfree(sreport_assoc->cluster);
+		xfree(sreport_assoc->parent_acct);
+		xfree(sreport_assoc->user);
+		xfree(sreport_assoc);
+	}
+}
+
+extern void destroy_sreport_user_rec(void *object)
+{
+	sreport_user_rec_t *sreport_user = (sreport_user_rec_t *)object;
+	if(sreport_user) {
+		xfree(sreport_user->acct);
+		if(sreport_user->acct_list)
+			list_destroy(sreport_user->acct_list);
+		xfree(sreport_user->name);
+		xfree(sreport_user);
+	}
+}
+
+extern void destroy_sreport_cluster_rec(void *object)
+{
+	sreport_cluster_rec_t *sreport_cluster = 
+		(sreport_cluster_rec_t *)object;
+	if(sreport_cluster) {
+		if(sreport_cluster->assoc_list)
+			list_destroy(sreport_cluster->assoc_list);
+		xfree(sreport_cluster->name);
+		if(sreport_cluster->user_list)
+			list_destroy(sreport_cluster->user_list);
+		xfree(sreport_cluster);
+	}
+}
+
+/* 
+ * Comparator used for sorting users largest cpu to smallest cpu
+ * 
+ * returns: 1: user_a > user_b   0: user_a == user_b   -1: user_a < user_b
+ * 
+ */
+extern int sort_user_dec(sreport_user_rec_t *user_a, sreport_user_rec_t *user_b)
+{
+	int diff = 0;
+
+	if(sort_flag == SREPORT_SORT_TIME) {
+		if (user_a->cpu_secs > user_b->cpu_secs)
+			return -1;
+		else if (user_a->cpu_secs < user_b->cpu_secs)
+			return 1;
+	}
+
+	if(!user_a->name || !user_b->name)
+		return 0;
+
+	diff = strcmp(user_a->name, user_b->name);
+
+	if (diff > 0)
+		return 1;
+	else if (diff < 0)
+		return -1;
+	
+	return 0;
+}
+
+/* 
+ * Comparator used for sorting clusters alphabetically
+ * 
+ * returns: 1: cluster_a > cluster_b   
+ *           0: cluster_a == cluster_b
+ *           -1: cluster_a < cluster_b
+ * 
+ */
+extern int sort_cluster_dec(sreport_cluster_rec_t *cluster_a,
+			    sreport_cluster_rec_t *cluster_b)
+{
+	int diff = 0;
+
+	if(!cluster_a->name || !cluster_b->name)
+		return 0;
+
+	diff = strcmp(cluster_a->name, cluster_b->name);
+
+	if (diff > 0)
+		return 1;
+	else if (diff < 0)
+		return -1;
+	
+	return 0;
+}
+
+/* 
+ * Comparator used for sorting assocs alphabetically by acct and then
+ * by user.  The association with a total count of time is at the top
+ * of the accts.
+ * 
+ * returns: -1: assoc_a > assoc_b   
+ *           0: assoc_a == assoc_b
+ *           1: assoc_a < assoc_b
+ * 
+ */
+extern int sort_assoc_dec(sreport_assoc_rec_t *assoc_a,
+			  sreport_assoc_rec_t *assoc_b)
+{
+	int diff = 0;
+
+	if(!assoc_a->acct || !assoc_b->acct)
+		return 0;
+
+	diff = strcmp(assoc_a->acct, assoc_b->acct);
+
+	if (diff > 0)
+		return 1;
+	else if (diff < 0)
+		return -1;
+	
+	if(!assoc_a->user && assoc_b->user)
+		return 1;
+	else if(!assoc_b->user)
+		return -1;
+
+	diff = strcmp(assoc_a->user, assoc_b->user);
+
+	if (diff > 0)
+		return 1;
+	else if (diff < 0)
+		return -1;
+	
+
+	return 0;
+}
+
diff --git a/src/sreport/job_reports.c b/src/sreport/job_reports.c
index 56f31907a..8c6e56ed2 100644
--- a/src/sreport/job_reports.c
+++ b/src/sreport/job_reports.c
@@ -80,6 +80,7 @@ enum {
 
 static List print_fields_list = NULL; /* types are of print_field_t */
 static List grouping_print_fields_list = NULL; /* types are of print_field_t */
+static int print_job_count = 0;
 
 static void _destroy_local_grouping(void *object)
 {
@@ -223,6 +224,7 @@ static int _set_cond(int *start, int argc, char *argv[],
 	int set = 0;
 	int end = 0;
 	int local_cluster_flag = all_clusters_flag;
+	time_t start_time, end_time;
 
 	if(!job_cond->cluster_list)
 		job_cond->cluster_list = list_create(slurm_destroy_char);
@@ -237,29 +239,30 @@ static int _set_cond(int *start, int argc, char *argv[],
 		} else if(!end && !strncasecmp(argv[i], "all_clusters", 1)) {
 			local_cluster_flag = 1;
 			continue;
-		} else if(!end || !strncasecmp (argv[i], "Clusters", 1)) {
-			slurm_addto_char_list(job_cond->cluster_list, argv[i]);
+		} else if(!end && !strncasecmp(argv[i], "PrintJobCount", 2)) {
+			print_job_count = 1;
+			continue;
+		} else if(!end 
+			  || !strncasecmp (argv[i], "Clusters", 1)) {
+			slurm_addto_char_list(job_cond->cluster_list,
+					      argv[i]+end);
 			set = 1;
 		} else if (!strncasecmp (argv[i], "Accounts", 2)) {
 			if(!job_cond->acct_list)
 				job_cond->acct_list =
 					list_create(slurm_destroy_char);
 			slurm_addto_char_list(job_cond->acct_list,
-					argv[i]+end);
+					      argv[i]+end);
 			set = 1;
 		} else if (!strncasecmp (argv[i], "Associations", 2)) {
 			if(!job_cond->associd_list)
 				job_cond->associd_list =
 					list_create(slurm_destroy_char);
 			slurm_addto_char_list(job_cond->associd_list,
-					argv[i]+end);
-			set = 1;
-		} else if (!strncasecmp (argv[i], "Clusters", 1)) {
-			slurm_addto_char_list(job_cond->cluster_list,
-					argv[i]+end);
+					      argv[i]+end);
 			set = 1;
 		} else if (!strncasecmp (argv[i], "End", 1)) {
-			job_cond->usage_end = parse_time(argv[i]+end);
+			job_cond->usage_end = parse_time(argv[i]+end, 1);
 			set = 1;
 		} else if (!strncasecmp (argv[i], "Format", 1)) {
 			if(format_list)
@@ -269,11 +272,12 @@ static int _set_cond(int *start, int argc, char *argv[],
 				job_cond->groupid_list =
 					list_create(slurm_destroy_char);
 			slurm_addto_char_list(job_cond->groupid_list,
-					argv[i]+end);
+					      argv[i]+end);
 			set = 1;
 		} else if (!strncasecmp (argv[i], "grouping", 2)) {
 			if(grouping_list)
-				slurm_addto_char_list(grouping_list, argv[i]+end);
+				slurm_addto_char_list(grouping_list, 
+						      argv[i]+end);
 		} else if (!strncasecmp (argv[i], "Jobs", 1)) {
 			char *end_char = NULL, *start_char = argv[i]+end;
 			jobacct_selected_step_t *selected_step = NULL;
@@ -286,7 +290,7 @@ static int _set_cond(int *start, int argc, char *argv[],
 			       && start_char) {
 				*end_char = 0;
 				while (isspace(*start_char))
-					start_char++;	/* discard whitespace */
+					start_char++;  /* discard whitespace */
 				if(!(int)*start_char)
 					continue;
 				selected_step = xmalloc(
@@ -306,15 +310,15 @@ static int _set_cond(int *start, int argc, char *argv[],
 			}
 			
 			set = 1;
-		} else if (!strncasecmp (argv[i], "Partitions", 1)) {
+		} else if (!strncasecmp (argv[i], "Partitions", 2)) {
 			if(!job_cond->partition_list)
 				job_cond->partition_list =
 					list_create(slurm_destroy_char);
 			slurm_addto_char_list(job_cond->partition_list,
-					argv[i]+end);
+					      argv[i]+end);
 			set = 1;
 		} else if (!strncasecmp (argv[i], "Start", 1)) {
-			job_cond->usage_start = parse_time(argv[i]+end);
+			job_cond->usage_start = parse_time(argv[i]+end, 1);
 			set = 1;
 		} else if (!strncasecmp (argv[i], "Users", 1)) {
 			if(!job_cond->userid_list)
@@ -326,7 +330,7 @@ static int _set_cond(int *start, int argc, char *argv[],
 		} else {
 			exit_code=1;
 			fprintf(stderr, " Unknown condition: %s\n"
-			       "Use keyword set to modify value\n", argv[i]);
+				"Use keyword set to modify value\n", argv[i]);
 		}
 	}
 	(*start) = i;
@@ -337,8 +341,15 @@ static int _set_cond(int *start, int argc, char *argv[],
 			list_append(job_cond->cluster_list, temp);
 	}
 
-	set_start_end_time((time_t *)&job_cond->usage_start,
-			   (time_t *)&job_cond->usage_end);
+	/* This needs to be done on some systems to make sure
+	   cluster_cond isn't messed.  This has happened on some 64
+	   bit machines and this is here to be on the safe side.
+	*/
+	start_time = job_cond->usage_start;
+	end_time = job_cond->usage_end;
+	set_start_end_time(&start_time, &end_time);
+	job_cond->usage_start = start_time;
+	job_cond->usage_end = end_time;
 
 	return set;
 }
@@ -362,6 +373,7 @@ static int _setup_print_fields_list(List format_list)
 
 	itr = list_iterator_create(format_list);
 	while((object = list_next(itr))) {
+		char *tmp_char = NULL;
 		field = xmalloc(sizeof(print_field_t));
 		if(!strncasecmp("Account", object, 1)) {
 			field->type = PRINT_JOB_ACCOUNT;
@@ -373,12 +385,7 @@ static int _setup_print_fields_list(List format_list)
 			field->name = xstrdup("Cluster");
 			field->len = 9;
 			field->print_routine = print_fields_str;
-		} else if(!strncasecmp("Count", object, 2)) {
-			field->type = PRINT_JOB_COUNT;
-			field->name = xstrdup("Job Count");
-			field->len = 9;
-			field->print_routine = print_fields_uint;
-		} else if(!strncasecmp("cpu_count", object, 2)) {
+		} else if(!strncasecmp("CpuCount", object, 2)) {
 			field->type = PRINT_JOB_CPUS;
 			field->name = xstrdup("CPU Count");
 			field->len = 9;
@@ -388,7 +395,12 @@ static int _setup_print_fields_list(List format_list)
 			field->name = xstrdup("Duration");
 			field->len = 12;
 			field->print_routine = print_fields_time;
-		} else if(!strncasecmp("node_count", object, 2)) {
+		} else if(!strncasecmp("JobCount", object, 2)) {
+			field->type = PRINT_JOB_COUNT;
+			field->name = xstrdup("Job Count");
+			field->len = 9;
+			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("NodeCount", object, 2)) {
 			field->type = PRINT_JOB_NODES;
 			field->name = xstrdup("Node Count");
 			field->len = 9;
@@ -404,6 +416,11 @@ static int _setup_print_fields_list(List format_list)
 			xfree(field);
 			continue;
 		}
+		if((tmp_char = strstr(object, "\%"))) {
+			int newlen = atoi(tmp_char+1);
+			if(newlen > 0) 
+				field->len = newlen;
+		}
 		list_append(print_fields_list, field);		
 	}
 	list_iterator_destroy(itr);
@@ -416,8 +433,10 @@ static int _setup_grouping_print_fields_list(List grouping_list)
 	ListIterator itr = NULL;
 	print_field_t *field = NULL;
 	char *object = NULL;
+	char *last_object = NULL;
 	uint32_t last_size = 0;
 	uint32_t size = 0;
+	char *tmp_char = NULL;
 
 	if(!grouping_list || !list_count(grouping_list)) {
 		exit_code=1;
@@ -433,22 +452,55 @@ static int _setup_grouping_print_fields_list(List grouping_list)
 	while((object = list_next(itr))) {
 		field = xmalloc(sizeof(print_field_t));
 		size = atoi(object);
-
-		field->type = PRINT_JOB_SIZE;
+		if(print_job_count)
+			field->type = PRINT_JOB_COUNT;
+		else
+			field->type = PRINT_JOB_SIZE;
 		field->name = xstrdup_printf("%u-%u cpus", last_size, size-1);
-		field->len = 13;
-		field->print_routine = sreport_print_time;
+		if(time_format == SREPORT_TIME_SECS_PER
+		   || time_format == SREPORT_TIME_MINS_PER
+		   || time_format == SREPORT_TIME_HOURS_PER)
+			field->len = 20;
+		else
+			field->len = 13;
+
+		if(print_job_count)
+			field->print_routine = print_fields_uint;
+		else
+			field->print_routine = sreport_print_time;
 		last_size = size;
+		last_object = object;
+		if((tmp_char = strstr(object, "\%"))) {
+			int newlen = atoi(tmp_char+1);
+			if(newlen > 0) 
+				field->len = newlen;
+		}
 		list_append(grouping_print_fields_list, field);		
 	}
 	list_iterator_destroy(itr);
 
 	if(last_size) {
 		field = xmalloc(sizeof(print_field_t));
-		field->type = PRINT_JOB_SIZE;
+		if(print_job_count)
+			field->type = PRINT_JOB_COUNT;
+		else
+			field->type = PRINT_JOB_SIZE;
 		field->name = xstrdup_printf("> %u cpus", last_size);
-		field->len = 13;
-		field->print_routine = sreport_print_time;
+		if(time_format == SREPORT_TIME_SECS_PER
+		   || time_format == SREPORT_TIME_MINS_PER
+		   || time_format == SREPORT_TIME_HOURS_PER)
+			field->len = 20;
+		else
+			field->len = 13;
+		if(print_job_count)
+			field->print_routine = print_fields_uint;
+		else
+			field->print_routine = sreport_print_time;
+		if((tmp_char = strstr(last_object, "\%"))) {
+			int newlen = atoi(tmp_char+1);
+			if(newlen > 0) 
+				field->len = newlen;
+		}
 		list_append(grouping_print_fields_list, field);		
 	}
 
@@ -479,10 +531,14 @@ extern int job_sizes_grouped_by_top_acct(int argc, char *argv[])
 	print_field_t *field = NULL;
 	print_field_t total_field;
 	uint32_t total_time = 0;
+	sreport_time_format_t temp_format;
+	
 	List job_list = NULL;
 	List cluster_list = NULL;
 	List assoc_list = NULL;
 
+	List tmp_acct_list = NULL;
+
 	List format_list = list_create(slurm_destroy_char);
 	List grouping_list = list_create(slurm_destroy_char);
 
@@ -492,9 +548,10 @@ extern int job_sizes_grouped_by_top_acct(int argc, char *argv[])
 
 	print_fields_list = list_create(destroy_print_field);
 
-	_set_cond(&i, argc, argv, job_cond, NULL, grouping_list);
-
-	slurm_addto_char_list(format_list, "Cl,a");
+	_set_cond(&i, argc, argv, job_cond, format_list, grouping_list);
+	
+	if(!list_count(format_list))
+		slurm_addto_char_list(format_list, "Cl,a");
 
 	if(!list_count(grouping_list)) 
 		slurm_addto_char_list(grouping_list, "50,250,500,1000");
@@ -504,7 +561,16 @@ extern int job_sizes_grouped_by_top_acct(int argc, char *argv[])
 
 	_setup_grouping_print_fields_list(grouping_list);
 
+	/* we don't want to actually query by accounts in the jobs
+	   here since we may be looking for sub accounts of a specific
+	   account.
+	*/
+	tmp_acct_list = job_cond->acct_list;
+	job_cond->acct_list = NULL;
 	job_list = jobacct_storage_g_get_jobs_cond(db_conn, my_uid, job_cond);
+	job_cond->acct_list = tmp_acct_list;
+	tmp_acct_list = NULL;
+
 	if(!job_list) {
 		exit_code=1;
 		fprintf(stderr, " Problem with job query.\n");
@@ -512,11 +578,15 @@ extern int job_sizes_grouped_by_top_acct(int argc, char *argv[])
 	}
 
 	memset(&assoc_cond, 0, sizeof(acct_association_cond_t));
-	assoc_cond.acct_list = job_cond->acct_list;
 	assoc_cond.id_list = job_cond->associd_list;
 	assoc_cond.cluster_list = job_cond->cluster_list;
 	assoc_cond.partition_list = job_cond->partition_list;
-	assoc_cond.parent_acct = "root";
+	if(!job_cond->acct_list || !list_count(job_cond->acct_list)) {
+		job_cond->acct_list = list_create(NULL);
+		list_append(job_cond->acct_list, "root");
+	}
+	assoc_cond.parent_acct_list = job_cond->acct_list;	
+	
 
 	assoc_list = acct_storage_g_get_associations(db_conn, my_uid,
 						     &assoc_cond);
@@ -524,17 +594,20 @@ extern int job_sizes_grouped_by_top_acct(int argc, char *argv[])
 	if(print_fields_have_header) {
 		char start_char[20];
 		char end_char[20];
+		time_t my_start = job_cond->usage_start;
 		time_t my_end = job_cond->usage_end-1;
 
-		slurm_make_time_str((time_t *)&job_cond->usage_start, 
-				    start_char, sizeof(start_char));
-		slurm_make_time_str(&my_end,
-				    end_char, sizeof(end_char));
+		slurm_make_time_str(&my_start, start_char, sizeof(start_char));
+		slurm_make_time_str(&my_end, end_char, sizeof(end_char));
 		printf("----------------------------------------"
 		       "----------------------------------------\n");
 		printf("Job Sizes %s - %s (%d secs)\n", 
 		       start_char, end_char, 
 		       (job_cond->usage_end - job_cond->usage_start));
+		if(print_job_count)
+			printf("Units are in number of jobs ran\n");
+		else
+			printf("Time reported in %s\n", time_format_string);
 		printf("----------------------------------------"
 		       "----------------------------------------\n");
 	}
@@ -610,7 +683,7 @@ no_assocs:
 
 	memset(&total_field, 0, sizeof(print_field_t));
 	total_field.type = PRINT_JOB_SIZE;
-	total_field.name = xstrdup("% of Cluster");
+	total_field.name = xstrdup("% of cluster");
 	total_field.len = 12;
 	total_field.print_routine = sreport_print_time;
 	list_append(header_list, &total_field);
@@ -621,7 +694,6 @@ no_assocs:
 	while((job = list_next(itr))) {
 		char *local_cluster = "UNKNOWN";
 		char *local_account = "UNKNOWN";
-		char *group;
 
 		if(!job->elapsed) {
 			/* here we don't care about jobs that didn't
@@ -633,17 +705,22 @@ no_assocs:
 		if(job->account) 
 			local_account = job->account;
 
+		list_iterator_reset(cluster_itr);
 		while((cluster_group = list_next(cluster_itr))) {
 			if(!strcmp(local_cluster, cluster_group->cluster)) 
 				break;
 		}
 		if(!cluster_group) {
-			cluster_group = 
-				xmalloc(sizeof(cluster_grouping_t));
-			cluster_group->cluster = xstrdup(local_cluster);
-			cluster_group->acct_list =
-				list_create(_destroy_acct_grouping);
-			list_append(cluster_list, cluster_group);
+			/* here we are only looking for groups that
+			 * were added with the associations above
+			 */
+			continue;
+/* 			cluster_group =  */
+/* 				xmalloc(sizeof(cluster_grouping_t)); */
+/* 			cluster_group->cluster = xstrdup(local_cluster); */
+/* 			cluster_group->acct_list = */
+/* 				list_create(_destroy_acct_grouping); */
+/* 			list_append(cluster_list, cluster_group); */
 		}
 
 		acct_itr = list_iterator_create(cluster_group->acct_list);
@@ -662,29 +739,34 @@ no_assocs:
 		list_iterator_destroy(acct_itr);		
 			
 		if(!acct_group) {
-			uint32_t last_size = 0;
-			acct_group = xmalloc(sizeof(acct_grouping_t));
-			acct_group->acct = xstrdup(local_account);
-			acct_group->groups =
-				list_create(_destroy_local_grouping);
-			list_append(cluster_group->acct_list, acct_group);
-
-			while((group = list_next(group_itr))) {
-				local_group = xmalloc(sizeof(local_grouping_t));
-				local_group->jobs = list_create(NULL);
-				local_group->min_size = last_size;
-				last_size = atoi(group);
-				local_group->max_size = last_size-1;
-				list_append(acct_group->groups, local_group);
-			}
-			if(last_size) {
-				local_group = xmalloc(sizeof(local_grouping_t));
-				local_group->jobs = list_create(NULL);
-				local_group->min_size = last_size;
-				local_group->max_size = INFINITE;
-				list_append(acct_group->groups, local_group);
-			}
-			list_iterator_reset(group_itr);
+			//char *group = NULL;
+			//uint32_t last_size = 0;
+			/* here we are only looking for groups that
+			 * were added with the associations above
+			 */
+			continue;
+/* 			acct_group = xmalloc(sizeof(acct_grouping_t)); */
+/* 			acct_group->acct = xstrdup(local_account); */
+/* 			acct_group->groups = */
+/* 				list_create(_destroy_local_grouping); */
+/* 			list_append(cluster_group->acct_list, acct_group); */
+
+/* 			while((group = list_next(group_itr))) { */
+/* 				local_group = xmalloc(sizeof(local_grouping_t)); */
+/* 				local_group->jobs = list_create(NULL); */
+/* 				local_group->min_size = last_size; */
+/* 				last_size = atoi(group); */
+/* 				local_group->max_size = last_size-1; */
+/* 				list_append(acct_group->groups, local_group); */
+/* 			} */
+/* 			if(last_size) { */
+/* 				local_group = xmalloc(sizeof(local_grouping_t)); */
+/* 				local_group->jobs = list_create(NULL); */
+/* 				local_group->min_size = last_size; */
+/* 				local_group->max_size = INFINITE; */
+/* 				list_append(acct_group->groups, local_group); */
+/* 			} */
+/* 			list_iterator_reset(group_itr); */
 		}
 
 		local_itr = list_iterator_create(acct_group->groups);
@@ -695,14 +777,13 @@ no_assocs:
 				continue;
 			list_append(local_group->jobs, job);
 			local_group->count++;
-			total_secs = job->elapsed*job->alloc_cpus;
+			total_secs = (uint64_t)job->elapsed 
+				* (uint64_t)job->alloc_cpus;
 			local_group->cpu_secs += total_secs;
 			acct_group->cpu_secs += total_secs;
 			cluster_group->cpu_secs += total_secs;
 		}
 		list_iterator_destroy(local_itr);		
-
-		list_iterator_reset(cluster_itr);
 	}
 	list_iterator_destroy(group_itr);
 	list_destroy(grouping_list);
@@ -712,6 +793,7 @@ no_assocs:
 	
 	itr = list_iterator_create(print_fields_list);
 	itr2 = list_iterator_create(grouping_print_fields_list);
+	list_iterator_reset(cluster_itr);
 	while((cluster_group = list_next(cluster_itr))) {
 		acct_itr = list_iterator_create(cluster_group->acct_list);
 		while((acct_group = list_next(acct_itr))) {
@@ -729,6 +811,9 @@ no_assocs:
 							     0);
 					break;
 				default:
+					field->print_routine(field,
+							     NULL,
+							     0);
 					break;
 				}
 			}
@@ -741,18 +826,31 @@ no_assocs:
 					field->print_routine(
 						field,
 						local_group->cpu_secs,
-						acct_group->cpu_secs);
+						acct_group->cpu_secs,
+						0);
+					break;
+				case PRINT_JOB_COUNT:
+					field->print_routine(
+						field,
+						local_group->count,
+						0);
 					break;
 				default:
+					field->print_routine(field,
+							     NULL,
+							     0);
 					break;
 				}
 			}
 			list_iterator_reset(itr2);
 			list_iterator_destroy(local_itr);
+			
+			temp_format = time_format;
+			time_format = SREPORT_TIME_PERCENT;
 			total_field.print_routine(&total_field,
 						  acct_group->cpu_secs,
 						  cluster_group->cpu_secs, 1);
-			
+			time_format = temp_format;
 			printf("\n");
 		}
 		list_iterator_destroy(acct_itr);
@@ -762,6 +860,8 @@ no_assocs:
 //	time_format = temp_time_format;
 
 end_it:
+	if(print_job_count)
+		print_job_count = 0;
 
 	destroy_acct_job_cond(job_cond);
 	
diff --git a/src/sreport/sreport.c b/src/sreport/sreport.c
index 74e7153e8..84793d6bf 100644
--- a/src/sreport/sreport.c
+++ b/src/sreport/sreport.c
@@ -52,9 +52,11 @@ int exit_flag;		/* program to terminate if =1 */
 int input_words;	/* number of words of input permitted */
 int quiet_flag;		/* quiet=1, verbose=-1, normal=0 */
 int all_clusters_flag = 0;
-sreport_time_format_t time_format = SREPORT_TIME_SECS;
+sreport_time_format_t time_format = SREPORT_TIME_MINS;
+char *time_format_string = "Minutes";
 void *db_conn = NULL;
 uint32_t my_uid = 0;
+sreport_sort_t sort_flag = SREPORT_SORT_TIME;
 
 static void	_job_rep (int argc, char *argv[]);
 static void	_user_rep (int argc, char *argv[]);
@@ -64,6 +66,7 @@ static int	_get_command (int *argc, char *argv[]);
 static void     _print_version( void );
 static int	_process_command (int argc, char *argv[]);
 static int      _set_time_format(char *format);
+static int      _set_sort(char *format);
 static void	_usage ();
 
 int 
@@ -82,6 +85,7 @@ main (int argc, char *argv[])
 		{"parsable", 0, 0, 'p'},
 		{"parsable2", 0, 0, 'P'},
 		{"quiet",    0, 0, 'q'},
+		{"sort",    0, 0, 's'},
 		{"usage",    0, 0, 'h'},
 		{"verbose",  0, 0, 'v'},
 		{"version",  0, 0, 'V'},
@@ -95,7 +99,7 @@ main (int argc, char *argv[])
 	quiet_flag        = 0;
 	log_init("sreport", opts, SYSLOG_FACILITY_DAEMON, NULL);
 
-	while((opt_char = getopt_long(argc, argv, "ahnpPqt:vV",
+	while((opt_char = getopt_long(argc, argv, "ahnpPqs:t:vV",
 			long_options, &option_index)) != -1) {
 		switch (opt_char) {
 		case (int)'?':
@@ -124,6 +128,9 @@ main (int argc, char *argv[])
 		case (int)'q':
 			quiet_flag = 1;
 			break;
+		case (int)'s':
+			_set_sort(optarg);
+			break;
 		case (int)'t':
 			_set_time_format(optarg);
 			break;
@@ -153,7 +160,7 @@ main (int argc, char *argv[])
 		}	
 	}
 
-	db_conn = acct_storage_g_get_connection(false, false);
+	db_conn = acct_storage_g_get_connection(false, 0, false);
 	my_uid = getuid();
 
 	if (input_field_count)
@@ -253,13 +260,18 @@ static void _cluster_rep (int argc, char *argv[])
 {
 	int error_code = SLURM_SUCCESS;
 
-	if (strncasecmp (argv[0], "Utilization", 1) == 0) {
+	if (strncasecmp (argv[0], "AccountUtilizationByUser", 1) == 0) {
+		error_code = cluster_account_by_user((argc - 1), &argv[1]);
+	} else if (strncasecmp (argv[0], "UserUtilizationByAccount", 2) == 0) {
+		error_code = cluster_user_by_account((argc - 1), &argv[1]);
+	} else if (strncasecmp (argv[0], "Utilization", 2) == 0) {
 		error_code = cluster_utilization((argc - 1), &argv[1]);
 	} else {
 		exit_code = 1;
 		fprintf(stderr, "Not valid report %s\n", argv[0]);
 		fprintf(stderr, "Valid cluster reports are, ");
-		fprintf(stderr, "\"Utilization\"\n");
+		fprintf(stderr, "\"AccountUtilizationByUser\", "
+			"\"UserUtilizationByAccount\", and \"Utilization\"\n");
 	}
 	
 	if (error_code) {
@@ -433,6 +445,14 @@ _process_command (int argc, char *argv[])
 				 argv[0]);
 		}
 		exit_flag = 1;
+	} else if (strncasecmp (argv[0], "sort", 1) == 0) {
+		if (argc < 2) {
+			exit_code = 1;
+			fprintf (stderr,
+				 "too few arguments for keyword:%s\n",
+				 argv[0]);
+		} else		
+			_set_sort(argv[1]);
 	} else if (strncasecmp (argv[0], "time", 1) == 0) {
 		if (argc < 2) {
 			exit_code = 1;
@@ -478,10 +498,25 @@ static int _set_time_format(char *format)
 {
 	if (strncasecmp (format, "SecPer", 6) == 0) {
 		time_format = SREPORT_TIME_SECS_PER;
-	} else if (strncasecmp (format, "Sec", 1) == 0) {
+		time_format_string = "Seconds/Percentange of Total";
+	} else if (strncasecmp (format, "MinPer", 6) == 0) {
+		time_format = SREPORT_TIME_MINS_PER;
+		time_format_string = "Minutes/Percentange of Total";
+	} else if (strncasecmp (format, "HourPer", 6) == 0) {
+		time_format = SREPORT_TIME_HOURS_PER;
+		time_format_string = "Hours/Percentange of Total";
+	} else if (strncasecmp (format, "Seconds", 1) == 0) {
 		time_format = SREPORT_TIME_SECS;
+		time_format_string = "Seconds";
+	} else if (strncasecmp (format, "Minutes", 1) == 0) {
+		time_format = SREPORT_TIME_MINS;
+		time_format_string = "Minutes";
+	} else if (strncasecmp (format, "Hours", 1) == 0) {
+		time_format = SREPORT_TIME_HOURS;
+		time_format_string = "Hours";
 	} else if (strncasecmp (format, "Percent", 1) == 0) {
 		time_format = SREPORT_TIME_PERCENT;
+		time_format_string = "Percentange of Total";
 	} else {
 		fprintf (stderr, "unknown time format %s", format);	
 		return SLURM_ERROR;
@@ -490,6 +525,20 @@ static int _set_time_format(char *format)
 	return SLURM_SUCCESS;
 }
 
+static int _set_sort(char *format)
+{
+	if (strncasecmp (format, "Name", 1) == 0) {
+		sort_flag = SREPORT_SORT_NAME;
+	} else if (strncasecmp (format, "Time", 6) == 0) {
+		sort_flag = SREPORT_SORT_TIME;
+	} else {
+		fprintf (stderr, "unknown timesort format %s", format);	
+		return SLURM_ERROR;
+	}
+
+	return SLURM_SUCCESS;
+}
+
 
 /* _usage - show the valid sreport commands */
 void _usage () {
@@ -499,9 +548,10 @@ sreport [<OPTION>] [<COMMAND>]                                             \n\
      -a or --all_clusters: Use all clusters instead of current             \n\
      -h or --help: equivalent to \"help\" command                          \n\
      -n or --no_header: equivalent to \"no_header\" command                \n\
-     -q or --quiet: equivalent to \"quiet\" command                        \n\
      -p or --parsable: output will be '|' delimited with a '|' at the end  \n\
      -P or --parsable2: output will be '|' delimited without a '|' at the end\n\
+     -q or --quiet: equivalent to \"quiet\" command                        \n\
+     -t <time_format>: Second, Minute, Hour, Percent, SecPer, MinPer, HourPer\n\
      -v or --verbose: equivalent to \"verbose\" command                    \n\
      -V or --version: equivalent to \"version\" command                    \n\
                                                                            \n\
@@ -510,15 +560,16 @@ sreport [<OPTION>] [<COMMAND>]                                             \n\
   terminated.                                                              \n\
                                                                            \n\
     Valid <COMMAND> values are:                                            \n\
-     exit                     terminate sreport                            \n\
-     help                     print this description of use.               \n\
-     parsable                 output will be | delimited with an ending '|'\n\
-     parsable2                output will be | delimited without an ending '|'\n\
-    quiet                    print no messages other than error messages. \n\
-     quit                     terminate this command.                      \n\
-     verbose                  enable detailed logging.                     \n\
-     version                  display tool version number.                 \n\
-     !!                       Repeat the last command entered.             \n\
+     exit                Terminate sreport                                 \n\
+     help                Print this description of use.                    \n\
+     parsable            Output will be | delimited with an ending '|'     \n\
+     parsable2           Output will be | delimited without an ending '|'  \n\
+     quiet               Print no messages other than error messages.      \n\
+     quit                Terminate this command.                           \n\
+     time <time_format>  Second, Minute, Hour, Percent, SecPer, MinPer, HourPer\n\
+     verbose             Enable detailed logging.                          \n\
+     version             Display tool version number.                      \n\
+     !!                  Repeat the last command entered.                  \n\
                                                                            \n\
     Valid report types are:                                                \n\
      cluster <REPORT> <OPTIONS>                                            \n\
@@ -526,7 +577,7 @@ sreport [<OPTION>] [<COMMAND>]                                             \n\
      user <REPORT> <OPTIONS>                                               \n\
                                                                            \n\
   <REPORT> is different for each report type.                              \n\
-     cluster - Utilization                                                 \n\
+     cluster - AccountUtilizationByUser, UserUtilizationByAccount, Utilization\n\
      job     - Sizes                                                       \n\
      user    - TopUsage                                                    \n\
                                                                            \n\
@@ -544,6 +595,9 @@ sreport [<OPTION>] [<COMMAND>]                                             \n\
                                                                            \n\
      cluster - Names=<OPT>      - List of clusters to include in report    \n\
                                   Default is local cluster.                \n\
+             - Tree             - When used with the AccountUtilizationByUser\n\
+                                  report will span the accounts as they    \n\
+                                  in the hierarchy.                        \n\
                                                                            \n\
      job     - Accounts=<OPT>   - List of accounts to use for the report   \n\
                                   Default is all.                          \n\
@@ -558,10 +612,14 @@ sreport [<OPTION>] [<COMMAND>]                                             \n\
                                   Default is all.                          \n\
              - Partitions=<OPT> - List of partitions jobs ran on to include\n\
                                   in report.  Default is all.              \n\
+             - PrintJobCount    - When used with the Sizes report will print\n\
+                                  number of jobs ran instead of time used. \n\
              - Users=<OPT>      - List of users jobs to include in report. \n\
                                   Default is all.                          \n\
                                                                            \n\
-     user    - Clusters=<OPT>   - List of clusters to include in report.   \n\
+     user    - Accounts=<OPT>   - List of accounts to use for the report   \n\
+                                  Default is all.                          \n\
+             - Clusters=<OPT>   - List of clusters to include in report.   \n\
                                   Default is local cluster.                \n\
              - Group            - Group all accounts together for each user.\n\
                                   Default is a separate entry for each user\n\
diff --git a/src/sreport/sreport.h b/src/sreport/sreport.h
index bd450fb78..05ff40cfe 100644
--- a/src/sreport/sreport.h
+++ b/src/sreport/sreport.h
@@ -88,11 +88,46 @@
 
 typedef enum {
 	SREPORT_TIME_SECS,
+	SREPORT_TIME_MINS,
+	SREPORT_TIME_HOURS,
 	SREPORT_TIME_PERCENT,
 	SREPORT_TIME_SECS_PER,
+	SREPORT_TIME_MINS_PER,
+	SREPORT_TIME_HOURS_PER,
 } sreport_time_format_t;
 
+typedef enum {
+	SREPORT_SORT_TIME,
+	SREPORT_SORT_NAME
+} sreport_sort_t;
+
+typedef struct {
+	char *acct;
+	char *cluster;
+	uint64_t cpu_secs;
+	char *parent_acct;
+	char *user;
+} sreport_assoc_rec_t;
+
+typedef struct {
+	char *acct;
+	List acct_list; /* list of char *'s */
+	List assoc_list; /* list of acct_association_rec_t's */
+	uint64_t cpu_secs;
+	char *name;
+	uid_t uid;
+} sreport_user_rec_t;
+
+typedef struct {
+	List assoc_list; /* list of sreport_assoc_rec_t *'s */
+	uint32_t cpu_count;
+	uint64_t cpu_secs;
+	char *name;
+	List user_list; /* list of sreport_user_rec_t *'s */
+} sreport_cluster_rec_t;
+
 extern sreport_time_format_t time_format;
+extern char *time_format_string;
 extern char *command_name;
 extern int exit_code;	/* sacctmgr's exit code, =1 on any error at any time */
 extern int exit_flag;	/* program to terminate if =1 */
@@ -101,11 +136,22 @@ extern int quiet_flag;	/* quiet=1, verbose=-1, normal=0 */
 extern void *db_conn;
 extern uint32_t my_uid;
 extern int all_clusters_flag;
+extern sreport_sort_t sort_flag;
 
 extern void sreport_print_time(print_field_t *field,
 			       uint64_t value, uint64_t total_time, int last);
 extern int parse_option_end(char *option);
 extern char *strip_quotes(char *option, int *increased);
 extern int set_start_end_time(time_t *start, time_t *end);
+extern void destroy_sreport_assoc_rec(void *object);
+extern void destroy_sreport_user_rec(void *object);
+extern void destroy_sreport_cluster_rec(void *object);
+extern int sort_user_dec(sreport_user_rec_t *user_a,
+			 sreport_user_rec_t *user_b);
+extern int sort_cluster_dec(sreport_cluster_rec_t *cluster_a,
+			    sreport_cluster_rec_t *cluster_b);
+extern int sort_assoc_dec(sreport_assoc_rec_t *assoc_a,
+			  sreport_assoc_rec_t *assoc_b);
+
 
 #endif /* HAVE_SREPORT_H */
diff --git a/src/sreport/user_reports.c b/src/sreport/user_reports.c
index 168a631ab..aeb7fbd71 100644
--- a/src/sreport/user_reports.c
+++ b/src/sreport/user_reports.c
@@ -44,77 +44,13 @@ enum {
 	PRINT_USER_CLUSTER,
 	PRINT_USER_LOGIN,
 	PRINT_USER_PROPER,
-	PRINT_USER_USED
+	PRINT_USER_USED,
 };
 
-
-typedef struct {
-	List acct_list; /* list of char *'s */
-	uint64_t cpu_secs;
-	char *name;
-	uid_t uid;
-} local_user_rec_t;
-
-typedef struct {
-	uint64_t cpu_secs;
-	char *name;
-	List user_list; /* list of local_user_rec_t *'s */
-} local_cluster_rec_t;
-
 static List print_fields_list = NULL; /* types are of print_field_t */
 static bool group_accts = false;
 static int top_limit = 10;
 
-static void _destroy_local_user_rec(void *object)
-{
-	local_user_rec_t *local_user = (local_user_rec_t *)object;
-	if(local_user) {
-		if(local_user->acct_list)
-			list_destroy(local_user->acct_list);
-		xfree(local_user);
-	}
-}
-
-static void _destroy_local_cluster_rec(void *object)
-{
-	local_cluster_rec_t *local_cluster = (local_cluster_rec_t *)object;
-	if(local_cluster) {
-		xfree(local_cluster->name);
-		if(local_cluster->user_list)
-			list_destroy(local_cluster->user_list);
-		xfree(local_cluster);
-	}
-}
-
-/* 
- * Comparator used for sorting users largest cpu to smallest cpu
- * 
- * returns: -1: user_a > user_b   0: user_a == user_b   1: user_a < user_b
- * 
- */
-static int _sort_user_dec(local_user_rec_t *user_a, local_user_rec_t *user_b)
-{
-	int diff = 0;
-
-	if (user_a->cpu_secs > user_b->cpu_secs)
-		return -1;
-	else if (user_a->cpu_secs < user_b->cpu_secs)
-		return 1;
-
-	if(!user_a->name || !user_b->name)
-		return 0;
-
-	diff = strcmp(user_a->name, user_b->name);
-
-	if (diff > 0)
-		return -1;
-	else if (diff < 0)
-		return 1;
-	
-	return 0;
-}
-
-
 static int _set_cond(int *start, int argc, char *argv[],
 		     acct_user_cond_t *user_cond, List format_list)
 {
@@ -123,6 +59,7 @@ static int _set_cond(int *start, int argc, char *argv[],
 	int end = 0;
 	int local_cluster_flag = all_clusters_flag;
 	acct_association_cond_t *assoc_cond = NULL;
+	time_t start_time, end_time;
 	
 	if(!user_cond) {
 		error("We need an acct_user_cond to call this");
@@ -140,7 +77,6 @@ static int _set_cond(int *start, int argc, char *argv[],
 
 	if(!assoc_cond->cluster_list)
 		assoc_cond->cluster_list = list_create(slurm_destroy_char);
-
 	for (i=(*start); i<argc; i++) {
 		end = parse_option_end(argv[i]);
 		if (!strncasecmp (argv[i], "Set", 3)) {
@@ -159,32 +95,32 @@ static int _set_cond(int *start, int argc, char *argv[],
 				assoc_cond->user_list = 
 					list_create(slurm_destroy_char);
 			slurm_addto_char_list(assoc_cond->user_list,
-					      argv[i]);
+					      argv[i]+end);
 			set = 1;
 		} else if (!strncasecmp (argv[i], "Accounts", 2)) {
 			if(!assoc_cond->acct_list)
 				assoc_cond->acct_list =
 					list_create(slurm_destroy_char);
 			slurm_addto_char_list(assoc_cond->acct_list,
-					argv[i]+end);
+					      argv[i]+end);
 			set = 1;
 		} else if (!strncasecmp (argv[i], "Clusters", 1)) {
 			slurm_addto_char_list(assoc_cond->cluster_list,
-					argv[i]+end);
+					      argv[i]+end);
 			set = 1;
 		} else if (!strncasecmp (argv[i], "End", 1)) {
-			assoc_cond->usage_end = parse_time(argv[i]+end);
+			assoc_cond->usage_end = parse_time(argv[i]+end, 1);
 			set = 1;
 		} else if (!strncasecmp (argv[i], "Format", 1)) {
 			if(format_list)
 				slurm_addto_char_list(format_list, argv[i]+end);
 		} else if (!strncasecmp (argv[i], "Start", 1)) {
-			assoc_cond->usage_start = parse_time(argv[i]+end);
+			assoc_cond->usage_start = parse_time(argv[i]+end, 1);
 			set = 1;
 		} else {
 			exit_code=1;
 			fprintf(stderr, " Unknown condition: %s\n"
-			       "Use keyword set to modify value\n", argv[i]);
+				"Use keyword set to modify value\n", argv[i]);
 		}
 	}
 	(*start) = i;
@@ -195,8 +131,15 @@ static int _set_cond(int *start, int argc, char *argv[],
 			list_append(assoc_cond->cluster_list, temp);
 	}
 
-	set_start_end_time((time_t *)&assoc_cond->usage_start,
-			   (time_t *)&assoc_cond->usage_end);
+	/* This needs to be done on some systems to make sure
+	   cluster_cond isn't messed.  This has happened on some 64
+	   bit machines and this is here to be on the safe side.
+	*/
+	start_time = assoc_cond->usage_start;
+	end_time = assoc_cond->usage_end;
+	set_start_end_time(&start_time, &end_time);
+	assoc_cond->usage_start = start_time;
+	assoc_cond->usage_end = end_time;
 
 	return set;
 }
@@ -219,10 +162,11 @@ static int _setup_print_fields_list(List format_list)
 
 	itr = list_iterator_create(format_list);
 	while((object = list_next(itr))) {
+		char *tmp_char = NULL;
 		field = xmalloc(sizeof(print_field_t));
 		if(!strncasecmp("Accounts", object, 1)) {
 			field->type = PRINT_USER_ACCT;
-			field->name = xstrdup("Account(s)");
+			field->name = xstrdup("Account");
 			field->len = 15;
 			field->print_routine = print_fields_str;
 		} else if(!strncasecmp("Cluster", object, 1)) {
@@ -243,7 +187,9 @@ static int _setup_print_fields_list(List format_list)
 		} else if(!strncasecmp("Used", object, 1)) {
 			field->type = PRINT_USER_USED;
 			field->name = xstrdup("Used");
-			if(time_format == SREPORT_TIME_SECS_PER)
+			if(time_format == SREPORT_TIME_SECS_PER
+			   || time_format == SREPORT_TIME_MINS_PER
+			   || time_format == SREPORT_TIME_HOURS_PER)
 				field->len = 18;
 			else
 				field->len = 10;
@@ -254,6 +200,11 @@ static int _setup_print_fields_list(List format_list)
 			xfree(field);
 			continue;
 		}
+		if((tmp_char = strstr(object, "\%"))) {
+			int newlen = atoi(tmp_char+1);
+			if(newlen > 0) 
+				field->len = newlen;
+		}
 		list_append(print_fields_list, field);		
 	}
 	list_iterator_destroy(itr);
@@ -271,16 +222,15 @@ extern int user_top(int argc, char *argv[])
 	ListIterator cluster_itr = NULL;
 	List format_list = list_create(slurm_destroy_char);
 	List user_list = NULL;
-	List cluster_list = list_create(_destroy_local_cluster_rec);
+	List cluster_list = list_create(destroy_sreport_cluster_rec);
 	char *object = NULL;
 
 	int i=0;
-	uint32_t total_time = 0;
 	acct_user_rec_t *user = NULL;
 	acct_association_rec_t *assoc = NULL;
 	acct_accounting_rec_t *assoc_acct = NULL;
-	local_user_rec_t *local_user = NULL;
-	local_cluster_rec_t *local_cluster = NULL;
+	sreport_user_rec_t *sreport_user = NULL;
+	sreport_cluster_rec_t *sreport_cluster = NULL;
 	print_field_t *field = NULL;
 	int field_count = 0;
 
@@ -304,24 +254,29 @@ extern int user_top(int argc, char *argv[])
 	if(print_fields_have_header) {
 		char start_char[20];
 		char end_char[20];
+		time_t my_start = user_cond->assoc_cond->usage_start;
 		time_t my_end = user_cond->assoc_cond->usage_end-1;
 
-		slurm_make_time_str(
-			(time_t *)&user_cond->assoc_cond->usage_start, 
-			start_char, sizeof(start_char));
-		slurm_make_time_str(&my_end,
-				    end_char, sizeof(end_char));
+		slurm_make_time_str(&my_start, start_char, sizeof(start_char));
+		slurm_make_time_str(&my_end, end_char, sizeof(end_char));
 		printf("----------------------------------------"
 		       "----------------------------------------\n");
 		printf("Top %u Users %s - %s (%d secs)\n", 
 		       top_limit, start_char, end_char, 
 		       (user_cond->assoc_cond->usage_end 
 			- user_cond->assoc_cond->usage_start));
+		
+		switch(time_format) {
+		case SREPORT_TIME_PERCENT:
+			printf("Time reported in %s\n", time_format_string);
+			break; 
+		default:
+			printf("Time reported in CPU %s\n", time_format_string);
+			break;
+		}
 		printf("----------------------------------------"
 		       "----------------------------------------\n");
 	}
-	total_time = user_cond->assoc_cond->usage_end 
-		- user_cond->assoc_cond->usage_start;
 
 	itr = list_iterator_create(user_list);
 	cluster_itr = list_iterator_create(cluster_list);
@@ -343,63 +298,63 @@ extern int user_top(int argc, char *argv[])
 			   || !list_count(assoc->accounting_list))
 				continue;
 			
-			while((local_cluster = list_next(cluster_itr))) {
-				if(!strcmp(local_cluster->name, 
+			while((sreport_cluster = list_next(cluster_itr))) {
+				if(!strcmp(sreport_cluster->name, 
 					   assoc->cluster)) {
 					ListIterator user_itr = NULL;
 					if(!group_accts) {
-						local_user = NULL;
+						sreport_user = NULL;
 						goto new_user;
 					}
 					user_itr = list_iterator_create
-						(local_cluster->user_list); 
-					while((local_user 
+						(sreport_cluster->user_list); 
+					while((sreport_user 
 					       = list_next(user_itr))) {
-						if(local_user->uid 
+						if(sreport_user->uid 
 						   == user->uid) {
 							break;
 						}
 					}
 					list_iterator_destroy(user_itr);
 				new_user:
-					if(!local_user) {
-						local_user = xmalloc(
+					if(!sreport_user) {
+						sreport_user = xmalloc(
 							sizeof
-							(local_user_rec_t));
-						local_user->name =
+							(sreport_user_rec_t));
+						sreport_user->name =
 							xstrdup(assoc->user);
-						local_user->uid =
+						sreport_user->uid =
 							user->uid;
-						local_user->acct_list =
+						sreport_user->acct_list =
 							list_create
 							(slurm_destroy_char);
-						list_append(local_cluster->
+						list_append(sreport_cluster->
 							    user_list, 
-							    local_user);
+							    sreport_user);
 					}
 					break;
 				}
 			}
-			if(!local_cluster) {
-				local_cluster = 
-					xmalloc(sizeof(local_cluster_rec_t));
-				list_append(cluster_list, local_cluster);
-
-				local_cluster->name = xstrdup(assoc->cluster);
-				local_cluster->user_list = 
-					list_create(_destroy_local_user_rec);
-				local_user = 
-					xmalloc(sizeof(local_user_rec_t));
-				local_user->name = xstrdup(assoc->user);
-				local_user->uid = user->uid;
-				local_user->acct_list = 
+			if(!sreport_cluster) {
+				sreport_cluster = 
+					xmalloc(sizeof(sreport_cluster_rec_t));
+				list_append(cluster_list, sreport_cluster);
+
+				sreport_cluster->name = xstrdup(assoc->cluster);
+				sreport_cluster->user_list = 
+					list_create(destroy_sreport_user_rec);
+				sreport_user = 
+					xmalloc(sizeof(sreport_user_rec_t));
+				sreport_user->name = xstrdup(assoc->user);
+				sreport_user->uid = user->uid;
+				sreport_user->acct_list = 
 					list_create(slurm_destroy_char);
-				list_append(local_cluster->user_list, 
-					    local_user);
+				list_append(sreport_cluster->user_list, 
+					    sreport_user);
 			}
 			list_iterator_reset(cluster_itr);
 
-			itr3 = list_iterator_create(local_user->acct_list);
+			itr3 = list_iterator_create(sreport_user->acct_list);
 			while((object = list_next(itr3))) {
 				if(!strcmp(object, assoc->acct))
 					break;
@@ -407,13 +362,14 @@ extern int user_top(int argc, char *argv[])
 			list_iterator_destroy(itr3);
 
 			if(!object)
-				list_append(local_user->acct_list, 
+				list_append(sreport_user->acct_list, 
 					    xstrdup(assoc->acct));
 			itr3 = list_iterator_create(assoc->accounting_list);
 			while((assoc_acct = list_next(itr3))) {
-				local_user->cpu_secs += assoc_acct->alloc_secs;
-				local_cluster->cpu_secs += 
-					assoc_acct->alloc_secs;
+				sreport_user->cpu_secs += 
+					(uint64_t)assoc_acct->alloc_secs;
+				sreport_cluster->cpu_secs += 
+					(uint64_t)assoc_acct->alloc_secs;
 			}
 			list_iterator_destroy(itr3);
 		}
@@ -427,11 +383,11 @@ extern int user_top(int argc, char *argv[])
 	field_count = list_count(print_fields_list);
 
 	list_iterator_reset(cluster_itr);
-	while((local_cluster = list_next(cluster_itr))) {
-		list_sort(local_cluster->user_list, (ListCmpF)_sort_user_dec);
+	while((sreport_cluster = list_next(cluster_itr))) {
+		list_sort(sreport_cluster->user_list, (ListCmpF)sort_user_dec);
 	
-		itr = list_iterator_create(local_cluster->user_list);
-		while((local_user = list_next(itr))) {
+		itr = list_iterator_create(sreport_cluster->user_list);
+		while((sreport_user = list_next(itr))) {
 			int count = 0;
 			int curr_inx = 1;
 			while((field = list_next(itr2))) {
@@ -440,7 +396,7 @@ extern int user_top(int argc, char *argv[])
 				switch(field->type) {
 				case PRINT_USER_ACCT:
 					itr3 = list_iterator_create(
-						local_user->acct_list);
+						sreport_user->acct_list);
 					while((object = list_next(itr3))) {
 						if(tmp_char)
 							xstrfmtcat(tmp_char,
@@ -460,17 +416,17 @@ extern int user_top(int argc, char *argv[])
 				case PRINT_USER_CLUSTER:
 					field->print_routine(
 						field,
-						local_cluster->name,
+						sreport_cluster->name,
 						(curr_inx == field_count));
 					break;
 				case PRINT_USER_LOGIN:
 					field->print_routine(field,
-							     local_user->name,
+							     sreport_user->name,
 							     (curr_inx == 
 							      field_count));
 					break;
 				case PRINT_USER_PROPER:
-					pwd = getpwnam(local_user->name);
+					pwd = getpwnam(sreport_user->name);
 					if(pwd) {
 						tmp_char = strtok(pwd->pw_gecos,
 								  ",");
@@ -486,11 +442,14 @@ extern int user_top(int argc, char *argv[])
 				case PRINT_USER_USED:
 					field->print_routine(
 						field,
-						local_user->cpu_secs,
-						local_cluster->cpu_secs,
+						sreport_user->cpu_secs,
+						sreport_cluster->cpu_secs,
 						(curr_inx == field_count));
 					break;
 				default:
+					field->print_routine(
+						field, NULL,
+						(curr_inx == field_count));
 					break;
 				}
 				curr_inx++;
diff --git a/src/srun/allocate.c b/src/srun/allocate.c
index a87f3ce25..56d8b499e 100644
--- a/src/srun/allocate.c
+++ b/src/srun/allocate.c
@@ -1,6 +1,6 @@
 /*****************************************************************************\
  * src/srun/allocate.c - srun functions for managing node allocations
- * $Id: allocate.c 14684 2008-08-01 19:57:23Z jette $
+ * $Id: allocate.c 15262 2008-10-01 22:58:26Z jette $
  *****************************************************************************
  *  Copyright (C) 2002-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -96,7 +96,7 @@ static void _signal_while_allocating(int signo)
 {
 	destroy_job = 1;
 	if (pending_job_id != 0) {
-		slurm_complete_job(pending_job_id, 0);
+		slurm_complete_job(pending_job_id, NO_VAL);
 	}
 }
 
@@ -343,7 +343,10 @@ job_desc_msg_create_from_opts ()
 	j->contiguous     = opt.contiguous;
 	j->features       = opt.constraints;
 	j->immediate      = opt.immediate;
-	j->name           = opt.job_name;
+	if (opt.job_name)
+		j->name   = opt.job_name;
+	else
+		j->name   = opt.cmd_name;
 	j->req_nodes      = xstrdup(opt.nodelist);
 	
 	/* simplify the job allocation nodelist, 
@@ -509,7 +512,7 @@ create_job_step(srun_job_t *job)
 
 	job->ctx_params.node_count = job->nhosts;
 	if (!opt.nprocs_set && (opt.ntasks_per_node != NO_VAL))
-		 opt.nprocs = job->nhosts * opt.ntasks_per_node;
+		job->ntasks = opt.nprocs = job->nhosts * opt.ntasks_per_node;
 	job->ctx_params.task_count = opt.nprocs;
 	
 	job->ctx_params.cpu_count = opt.overcommit ? job->ctx_params.node_count
@@ -547,7 +550,11 @@ create_job_step(srun_job_t *job)
 	job->ctx_params.node_list = opt.nodelist;
 	
 	job->ctx_params.network = opt.network;
-	job->ctx_params.name = opt.job_name;
+	job->ctx_params.no_kill = opt.no_kill;
+	if (opt.job_name_set_cmd && opt.job_name)
+		job->ctx_params.name = opt.job_name;
+	else
+		job->ctx_params.name = opt.cmd_name;
 	
 	debug("requesting job %u, user %u, nodes %u including (%s)", 
 	      job->ctx_params.job_id, job->ctx_params.uid,
@@ -578,7 +585,7 @@ create_job_step(srun_job_t *job)
 		}
 		
 		if (i == 0) {
-			info("Job step creation temporarily disabled, retrying");	
+			info("Job step creation temporarily disabled, retrying");
 			ointf  = xsignal(SIGINT,  _intr_handler);
 			otermf  = xsignal(SIGTERM, _intr_handler);
 			oquitf  = xsignal(SIGQUIT, _intr_handler);
diff --git a/src/srun/opt.c b/src/srun/opt.c
index dd6eddef0..bdeb159cc 100644
--- a/src/srun/opt.c
+++ b/src/srun/opt.c
@@ -612,8 +612,10 @@ static void _opt_default()
 
 	opt.relative = NO_VAL;
 	opt.relative_set = false;
+	opt.cmd_name = NULL;
 	opt.job_name = NULL;
-	opt.job_name_set = false;
+	opt.job_name_set_cmd = false;
+	opt.job_name_set_env = false;
 	opt.jobid    = NO_VAL;
 	opt.jobid_set = false;
 	opt.dependency = NULL;
@@ -737,7 +739,8 @@ env_vars_t env_vars[] = {
 {"SLURM_DISTRIBUTION",  OPT_DISTRIB,    NULL,               NULL             },
 {"SLURM_GEOMETRY",      OPT_GEOMETRY,   NULL,               NULL             },
 {"SLURM_IMMEDIATE",     OPT_INT,        &opt.immediate,     NULL             },
-{"SLURM_JOB_NAME",      OPT_STRING,     &opt.job_name,      &opt.job_name_set},
+{"SLURM_JOB_NAME",      OPT_STRING,     &opt.job_name,      
+					&opt.job_name_set_env},
 {"SLURM_JOBID",         OPT_INT,        &opt.jobid,         NULL             },
 {"SLURM_KILL_BAD_EXIT", OPT_INT,        &opt.kill_bad_exit, NULL             },
 {"SLURM_LABELIO",       OPT_INT,        &opt.labelio,       NULL             },
@@ -1142,7 +1145,7 @@ static void set_options(const int argc, char **argv)
 			opt.join = true;
 			break;
 		case (int)'J':
-			opt.job_name_set = true;
+			opt.job_name_set_cmd = true;
 			xfree(opt.job_name);
 			opt.job_name = xstrdup(optarg);
 			break;
@@ -1409,7 +1412,7 @@ static void set_options(const int argc, char **argv)
 			opt.epilog = xstrdup(optarg);
 			break;
 		case LONG_OPT_BEGIN:
-			opt.begin = parse_time(optarg);
+			opt.begin = parse_time(optarg, 0);
 			if (opt.begin == 0) {
 				fatal("Invalid time specification %s",
 				      optarg);
@@ -1663,6 +1666,8 @@ static void _opt_args(int argc, char **argv)
 		setenv("SLURM_NETWORK", opt.network, 1);
 	}
 #endif
+	if (opt.dependency)
+		setenvfs("SLURM_JOB_DEPENDENCY=%s", opt.dependency);
 
 	if (opt.nodelist && (!opt.test_only)) {
 #ifdef HAVE_BG
@@ -1755,8 +1760,8 @@ static bool _opt_verify(void)
 	if (opt.job_min_cpus < opt.cpus_per_task)
 		opt.job_min_cpus = opt.cpus_per_task;
 
-	if ((opt.job_name == NULL) && (opt.argc > 0))
-		opt.job_name = base_name(opt.argv[0]);
+	if (opt.argc > 0)
+		opt.cmd_name = base_name(opt.argv[0]);
 
 	if(!opt.nodelist) {
 		if((opt.nodelist = xstrdup(getenv("SLURM_HOSTFILE")))) {
diff --git a/src/srun/opt.h b/src/srun/opt.h
index 200042374..46179874d 100644
--- a/src/srun/opt.h
+++ b/src/srun/opt.h
@@ -1,6 +1,6 @@
 /*****************************************************************************\
  *  opt.h - definitions for srun option processing
- *  $Id: opt.h 14469 2008-07-09 18:15:23Z jette $
+ *  $Id: opt.h 15204 2008-09-29 16:27:54Z jette $
  *****************************************************************************
  *  Copyright (C) 2002-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -119,8 +119,10 @@ typedef struct srun_options {
         uint32_t plane_size;    /* lllp distribution -> plane_size for
 				 * when -m plane=<# of lllp per
 				 * plane> */      
+	char *cmd_name;		/* name of command to execute	*/
 	char *job_name;		/* --job-name=,     -J name	*/
-	bool job_name_set;	/* true if job_name explicitly set */
+	bool job_name_set_cmd;	/* true if job_name set by cmd line option */
+	bool job_name_set_env;	/* true if job_name set by env var */
 	unsigned int jobid;     /* --jobid=jobid                */
 	bool jobid_set;		/* true if jobid explicitly set */
 	char *mpi_type;		/* --mpi=type			*/
diff --git a/src/srun/srun.c b/src/srun/srun.c
index c23266235..4187b9ab2 100644
--- a/src/srun/srun.c
+++ b/src/srun/srun.c
@@ -110,7 +110,7 @@
 
 mpi_plugin_client_info_t mpi_job_info[1];
 static struct termios termdefaults;
-int global_rc;
+uint32_t global_rc = 0;
 srun_job_t *job = NULL;
 
 struct {
@@ -250,7 +250,6 @@ int srun(int ac, char **av)
 		if (!job || create_job_step(job) < 0)
 			exit(1);
 	} else {
-		got_alloc = 1;
 		/* Combined job allocation and job step launch */
 #ifdef HAVE_FRONT_END
 		uid_t my_uid = getuid();
@@ -263,10 +262,16 @@ int srun(int ac, char **av)
 	
 		if ( !(resp = allocate_nodes()) ) 
 			exit(1);
+		got_alloc = 1;
 		_print_job_information(resp);
 		_set_cpu_env_var(resp);
 		job = job_create_allocation(resp);
+		
 		opt.exclusive = false;	/* not applicable for this step */
+		if (!opt.job_name_set_cmd && opt.job_name_set_env) {
+			/* use SLURM_JOB_NAME env var */
+			opt.job_name_set_cmd = true;
+		}
 		if (!job || create_job_step(job) < 0) {
 			slurm_complete_job(job->jobid, 1);
 			exit(1);
@@ -435,7 +440,7 @@ cleanup:
 	_task_state_struct_free();
 	log_fini();
 
-	return global_rc;
+	return (int)global_rc;
 }
 
 static int _call_spank_local_user (srun_job_t *job)
@@ -952,10 +957,10 @@ static void
 _task_finish(task_exit_msg_t *msg)
 {
 	bitstr_t *tasks_exited = NULL;
-	char buf[2048], *core_str = "", *msg_str, *node_list = NULL;
+	char buf[65536], *core_str = "", *msg_str, *node_list = NULL;
 	static bool first_done = true;
 	static bool first_error = true;
-	int rc = 0;
+	uint32_t rc = 0;
 	int i;
 
 	verbose("%u tasks finished (rc=%u)",
@@ -977,7 +982,6 @@ _task_finish(task_exit_msg_t *msg)
 		}
 	} else if (WIFSIGNALED(msg->return_code)) {
 		bit_or(task_state.finish_abnormal, tasks_exited);
-		rc = 1;
 		msg_str = strsignal(WTERMSIG(msg->return_code));
 #ifdef WCOREDUMP
 		if (WCOREDUMP(msg->return_code))
@@ -985,9 +989,11 @@ _task_finish(task_exit_msg_t *msg)
 #endif
 		node_list = _taskids_to_nodelist(tasks_exited);
 		if (job->state >= SRUN_JOB_CANCELLED) {
+			rc = NO_VAL;
 			verbose("%s: task %s: %s%s", 
 				node_list, buf, msg_str, core_str);
 		} else {
+			rc = msg->return_code;
 			error("%s: task %s: %s%s", 
 			      node_list, buf, msg_str, core_str);
 		}
@@ -1030,7 +1036,7 @@ static void
 _task_state_struct_print(void)
 {
 	bitstr_t *tmp, *seen, *not_seen;
-	char buf[BUFSIZ];
+	char buf[65536];
 	int len;
 
 	len = bit_size(task_state.finish_abnormal); /* all the same length */
@@ -1042,7 +1048,7 @@ _task_state_struct_print(void)
 	if (bit_set_count(task_state.finish_abnormal) > 0) {
 		bit_copybits(tmp, task_state.finish_abnormal);
 		bit_and(tmp, not_seen);
-		bit_fmt(buf, BUFSIZ, tmp);
+		bit_fmt(buf, sizeof(buf), tmp);
 		info("task %s: exited abnormally", buf);
 		bit_or(seen, tmp);
 		bit_copybits(not_seen, seen);
@@ -1052,7 +1058,7 @@ _task_state_struct_print(void)
 	if (bit_set_count(task_state.finish_normal) > 0) {
 		bit_copybits(tmp, task_state.finish_normal);
 		bit_and(tmp, not_seen);
-		bit_fmt(buf, BUFSIZ, tmp);
+		bit_fmt(buf, sizeof(buf), tmp);
 		info("task %s: exited", buf);
 		bit_or(seen, tmp);
 		bit_copybits(not_seen, seen);
@@ -1062,7 +1068,7 @@ _task_state_struct_print(void)
 	if (bit_set_count(task_state.start_failure) > 0) {
 		bit_copybits(tmp, task_state.start_failure);
 		bit_and(tmp, not_seen);
-		bit_fmt(buf, BUFSIZ, tmp);
+		bit_fmt(buf, sizeof(buf), tmp);
 		info("task %s: failed to start", buf);
 		bit_or(seen, tmp);
 		bit_copybits(not_seen, seen);
diff --git a/src/sview/job_info.c b/src/sview/job_info.c
index 9b5e3b9aa..835c330e2 100644
--- a/src/sview/job_info.c
+++ b/src/sview/job_info.c
@@ -832,7 +832,7 @@ static const char *_set_job_msg(job_desc_msg_t *job_msg, const char *new_text,
 		break;
 #endif
 	case SORTID_START_TIME:
-		job_msg->begin_time = parse_time((char *)new_text);
+		job_msg->begin_time = parse_time((char *)new_text, 0);
 		type = "start time";
 		break;
 	default:
diff --git a/testsuite/expect/README b/testsuite/expect/README
index 6d2bbda32..15b285078 100644
--- a/testsuite/expect/README
+++ b/testsuite/expect/README
@@ -130,7 +130,7 @@ test1.33   Test of srun application exit code reporting
 test1.34   Test of task signal abort message
 test1.35   Test of batch job with multiple concurrent job steps
 test1.36   Test parallel launch of srun (e.g. "srun srun hostname")
-test1.37   REMOVED
+test1.37   Test of srun --tasks-per-node option.
 test1.38   Test srun handling of SIGINT to get task status or kill the job
            (--quit-on-interrupt option).
 test1.39   Test of linux light-weight core files.
@@ -142,7 +142,7 @@ test1.43   Test of slurm_job_will_run API, (srun --test-only option).
 test1.44   Read srun's stdout slowly and test for lost data.   
 test1.45   REMOVED
 test1.46   Test srun option --kill-on-bad-exit
-test1.47   REMOVED
+test1.47   Test of job dependencies with singleton parameter.
 test1.48   Test of srun mail options (--mail-type and --mail-user options).
 test1.49   Test of srun task-prolog and task-epilog options.
 test1.50   Test of running non-existant job, confirm timely termination.
@@ -517,3 +517,4 @@ test21.16  sacctmgr add and list multiple users
 test21.17  sacctmgr modify user
 test21.18  sacctmgr modify multiple users
 test21.19  sacctmgr add and delete coordinator
+test21.20  sacctmgr add and modify QoS
diff --git a/testsuite/expect/globals b/testsuite/expect/globals
index 0126835ee..a9003d3df 100755
--- a/testsuite/expect/globals
+++ b/testsuite/expect/globals
@@ -1134,3 +1134,54 @@ proc check_acct_associations { } {
 	log_user 1
 	return $rc
 }
+
+################################################################
+# 
+#
+# 
+################################################################
+proc check_accounting_admin_level { } {
+        global sacctmgr alpha alpha_numeric_under bin_id
+
+        set admin_level ""
+
+	log_user 0
+
+	spawn $bin_id -u -n
+	expect {
+		-re "($alpha_numeric_under)" {
+			set user_name $expect_out(1,string)
+			exp_continue
+		}
+		eof {
+			wait
+		}
+	}
+
+	if { ![string length $user_name] } {
+	   	send_user "FAILURE: No name returned from id\n"
+		return ""
+	}
+
+     	#
+     	# Use sacctmgr to check admin_level
+     	#
+     	set s_pid [spawn $sacctmgr -n -P list user $user_name format=admin]
+     	expect {
+		-re "($alpha)" {
+	    	      set admin_level $expect_out(1,string)
+		      exp_continue
+ 	       }
+	       timeout {
+		      send_user "FAILURE: sacctmgr add not responding\n"
+		      slow_kill $s_pid
+		      set exit_code 1
+	       }
+	       eof {
+		      wait
+	       }
+        }
+
+	log_user 1
+	return $admin_level
+}
diff --git a/testsuite/expect/test1.24 b/testsuite/expect/test1.24
index cd49bd22a..ee70fa7ef 100755
--- a/testsuite/expect/test1.24
+++ b/testsuite/expect/test1.24
@@ -51,7 +51,7 @@ set host_0      ""
 set timeout $max_job_delay
 set srun_pid [spawn $srun -N1 -l --constraint=invalid,constraint -t1 $bin_printenv SLURMD_NODENAME]
 expect {
-	-re "configuration is not available" {
+	-re "error:.*Invalid feature specification" {
 		send_user "This error is expected, no worries\n"
 		set err_msg 1
 		exp_continue
diff --git a/testsuite/expect/test1.26 b/testsuite/expect/test1.26
index 51744936a..b6168ca78 100755
--- a/testsuite/expect/test1.26
+++ b/testsuite/expect/test1.26
@@ -9,7 +9,8 @@
 #          "FAILURE: ..." otherwise with an explanation of the failure, OR
 #          anything else indicates a failure mode that must be investigated.
 ############################################################################
-# Copyright (C) 2002-2006 The Regents of the University of California.
+# Copyright (C) 2002-2007 The Regents of the University of California.
+# Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
 # LLNL-CODE-402394.
@@ -171,22 +172,7 @@ for {set inx 0} {$inx < $interations} {incr inx} {
 	set spawn_id $noalloc2
 	expect {
 		-i $noalloc2
-		-re "error: .*try again" {
-			send_user "Can't avoid this possible error\n"
-			exp_continue
-		}
-		-re "error: .*already in shared memory" {
-			send_user "Can't avoid this possible error\n"
-			exp_continue
-		}
-		-re "error: .*exit code 1" {
-			exp_continue
-		}
-		-re "error: elan" {
-			send_user "Can't avoid this possible error\n"
-			exp_continue
-		}
-		-re "error: qsw_prog_init" {
+		-re "qsw_prog_init.*Error configuring interconnect" {
 			send_user "Can't avoid this possible error\n"
 			exp_continue
 		}
@@ -208,22 +194,7 @@ for {set inx 0} {$inx < $interations} {incr inx} {
 	set spawn_id $noalloc1
 	expect {
 		-i $noalloc1
-		-re "error: .*try again" {
-			send_user "Can't avoid this possible error\n"
-			exp_continue
-		}
-		-re "error: .*already in shared memory" {
-			send_user "Can't avoid this possible error\n"
-			exp_continue
-		}
-		-re "error: .*exit code 1" {
-			exp_continue
-		}
-		-re "error: elan" {
-			send_user "Can't avoid this possible error\n"
-			exp_continue
-		}
-		-re "error: qsw_prog_init" {
+		-re "qsw_prog_init.*Error configuring interconnect" {
 			send_user "Can't avoid this possible error\n"
 			exp_continue
 		}
@@ -255,19 +226,7 @@ for {set inx 0} {$inx < $interations} {incr inx} {
 			}
 			exp_continue
 		}
-		-re "error: elan" {
-			send_user "Can't avoid this possible error\n"
-			exp_continue
-		}
-		-re "error: qsw_prog_init" {
-			send_user "Can't avoid this possible error\n"
-			exp_continue
-		}
-		-re "error: .*configuring interconnect" {
-			send_user "Can't avoid this possible error\n"
-			exp_continue
-		}
-		-re "error: update_failed_tasks" {
+		-re "qsw_prog_init.*Error configuring interconnect" {
 			send_user "Can't avoid this possible error\n"
 			exp_continue
 		}
diff --git a/testsuite/expect/test1.37 b/testsuite/expect/test1.37
new file mode 100755
index 000000000..78b29cb72
--- /dev/null
+++ b/testsuite/expect/test1.37
@@ -0,0 +1,73 @@
+#!/usr/bin/expect
+############################################################################
+# Purpose: Test of SLURM functionality
+#          Test of --tasks-per-node option.
+#
+# Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
+#          "FAILURE: ..." otherwise with an explanation of the failure, OR
+#          anything else indicates a failure mode that must be investigated.
+############################################################################
+# Copyright (C) 2008 Lawrence Livermore National Security.
+# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+# Written by Morris Jette <jette1@llnl.gov>
+# LLNL-CODE-402394.
+# 
+# This file is part of SLURM, a resource management program.
+# For details, see <http://www.llnl.gov/linux/slurm/>.
+#  
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+# 
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+# 
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+source ./globals
+
+set test_id     "1.37"
+set exit_code    0
+
+print_header $test_id
+
+if { [test_bluegene] } {
+	send_user "\nWARNING: This test is incompatable with bluegene systems\n"
+	exit $exit_code
+}
+
+#
+# Spawn srun with $task_cnt tasks each of which runs a $mult way /bin/id
+#
+set task_output 0
+set timeout $max_job_delay
+set srun_pid [spawn $srun -N1 --tasks-per-node=2 -O -l $bin_id]
+expect {
+	-re "($number): uid=" {
+		incr task_output
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: srun not responding\n"
+		slow_kill $srun_pid
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+if {$task_output != 2} {
+	send_user "\nFAILURE: failed to get output from all tasks\n"
+	set exit_code 1
+}
+
+if {$exit_code == 0} {
+	send_user "\nSUCCESS\n"
+}
+exit $exit_code
diff --git a/testsuite/expect/test1.47 b/testsuite/expect/test1.47
new file mode 100755
index 000000000..cb83a2f29
--- /dev/null
+++ b/testsuite/expect/test1.47
@@ -0,0 +1,104 @@
+#!/usr/bin/expect
+############################################################################
+# Purpose: Test of SLURM functionality
+#          Test of job dependencies with singleton parameter.
+#
+# Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
+#          "FAILURE: ..." otherwise with an explanation of the failure, OR
+#          anything else indicates a failure mode that must be investigated.
+############################################################################
+# Copyright (C) 2008 Lawrence Livermore National Security.
+# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+# Written by Morris Jette <jette1@llnl.gov>
+# LLNL-CODE-402394.
+# 
+# This file is part of SLURM, a resource management program.
+# For details, see <http://www.llnl.gov/linux/slurm/>.
+#  
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+# 
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+# 
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+source ./globals
+
+set test_id     "1.47"
+set file_in     "test$test_id.input"
+set exit_code   0
+set job_id1     0
+set job_id2     0
+set job_name	"JOB.$test_id"
+
+print_header $test_id
+
+#
+# Build input script file
+#
+make_bash_script $file_in "$bin_sleep 15"
+
+#
+# Spawn a srun batch job that just sleeps for a while
+#
+set timeout $max_job_delay
+set sbatch_pid [spawn $sbatch --output=/dev/null --error=/dev/null --job-name=$job_name -t1 $file_in]
+expect {
+	-re "Submitted batch job ($number)" {
+		set job_id1 $expect_out(1,string)
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sbatch not responding\n"
+		slow_kill $sbatch_pid
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+if {$job_id1 == 0} {
+	send_user "\nFAILURE: batch submit failure\n"
+	exit 1
+}
+
+#
+# Submit a dependent job
+#
+set matches 0
+set srun_pid [spawn $srun -v --dependency=singleton --job-name=$job_name $scontrol show job $job_id1]
+expect {
+	-re "launching ($number).0" {
+		set job_id2 $expect_out(1,string)
+		exp_continue
+	}
+	-re "JobState=COMPLETED" {
+		set matches 1
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: srun not responding\n"
+		slow_kill $srun_pid
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+if {$matches == 0} {
+	send_user "\nFAILURE: Dependent job not completed\n"
+	set exit_code 1
+}
+
+if {$exit_code == 0} {
+	exec $bin_rm -f $file_in
+	send_user "\nSUCCESS\n"
+}
+exit $exit_code
diff --git a/testsuite/expect/test15.12 b/testsuite/expect/test15.12
index 5c8b25bf6..2a72cf7ab 100755
--- a/testsuite/expect/test15.12
+++ b/testsuite/expect/test15.12
@@ -51,7 +51,7 @@ set err_msg     0
 set timeout $max_job_delay
 spawn $salloc -N1 -t1 --constraint=invalid,constraint salloc -t1 $bin_bash 
 expect {
-	-re "configuration is not available" {
+	-re "error:.*Invalid feature specification" {
 		send_user "This error is expected, no worries\n"
 		set err_msg 1
 		exp_continue
diff --git a/testsuite/expect/test17.12 b/testsuite/expect/test17.12
index afd788900..9fa863ad9 100755
--- a/testsuite/expect/test17.12
+++ b/testsuite/expect/test17.12
@@ -60,7 +60,7 @@ set err_msg     0
 set timeout $max_job_delay
 spawn $sbatch -N1 --constraint=invalid,constraint -t1 $file_in  
 expect {
-	-re "configuration is not available" {
+	-re "error:.*Invalid feature specification" {
 		send_user "This error is expected, no worries\n"
 		set err_msg 1
 		exp_continue
diff --git a/testsuite/expect/test21.10 b/testsuite/expect/test21.10
index e40d6a52d..ae9cd61f1 100755
--- a/testsuite/expect/test21.10
+++ b/testsuite/expect/test21.10
@@ -43,19 +43,36 @@ set add		add
 set lis		list
 set del		delete
 set mod		modify
+set wa	        withassociations
 set nams	Names
 set nam		Name
 set fs		Fairshare
-set mc		MaxCPUSecs
+set gm		GrpCPUMins
+set gc		GrpCPUs
+set gj		GrpJobs
+set gs		GrpSubmitJobs
+set gn		GrpNodes
+set gw		GrpWall
+set mm		MaxCPUMins
+set mc		MaxCPUs
 set mj		MaxJobs
+set ms		MaxSubmitJobs
 set mn		MaxNodes
 set mw		MaxWall
 set clu		cluster
 set tc1		tcluster1
 set fs1		2500
-set mc1		1000000
-set mj1		50
-set mn1		300
+set gm1		1000
+set gc1		20
+set gj1		100
+set gs1		300
+set gn1		100
+set gw1		00:45:00
+set mc1		200
+set mm1		100000
+set mj1		500
+set ms1		400
+set mn1		200
 set mw1		01:00:00
 set acc		account
 set ass		associations
@@ -70,15 +87,142 @@ set access_err  0
 
 print_header $test_id
 
+#
+# Check accounting config and bail if not found.
+#
 if { [test_account_storage] == 0 } {
 	send_user "\nWARNING: This test can't be run without a usable AccountStorageType\n"
 	exit 0
 }
- 
+
+if { [string compare [check_accounting_admin_level] "Administrator"] } {
+	send_user "\nWARNING: This test can't be run without being an Accounting administrator.\nUse sacctmgr mod user \$USER_NAME admin=admin.\n"
+	exit 0
+}
+
+#
+# Use sacctmgr to delete the test cluster
+#
+	set nothing 0
+	set matches 0
+
+set sadel_pid [spawn $sacctmgr -i $del $clu $tc1]
+
+	expect {
+		-re "privilege to perform this action" {
+			set access_err 1
+			exp_continue
+		}
+		-re "(There was a problem|Unknown condition|Bad format on|Bad MaxWall|Unknown option)" {
+			send_user "FAILURE: there was a problem with the sacctmgr command\n"
+			incr exit_code 1
+		}
+		-re "Problem getting" {
+			send_user "FAILURE: there was a problem getting information from the database\n"
+			incr exit_code 1
+		}
+		-re "Problem adding" {
+			send_user "FAILURE: there was an unknwon problem\n"
+			incr exit_code 1
+		}
+		-re "No associations" {
+			send_user "FAILURE: your command didn't return anything\n"
+			incr exit_code 1
+		}
+		-re "Deleting clusters" {
+			incr matches
+			exp_continue
+		}
+		-re " Nothing deleted" {
+			incr matches
+			set nothing 1
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sacctmgr delete not responding\n"
+			slow_kill $my_pid
+			incr exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+	if {$access_err != 0} {
+		return 1
+	}
+	if {$matches != 1} {
+		send_user "\nFAILURE: sacctmgr had a problem deleting cluster got $matches\n"
+		incr exit_code 1
+	}
+	if { !$nothing } {
+		if { ![check_acct_associations] } {
+			send_user "\nFAILURE:  Our associations don't line up\n"
+			incr exit_code 1
+		}
+	}
+
+#
+# Use sacctmgr to remove an account
+#
+
+	set matches 0
+	set nothing 1
+	set check "Deleting account"
+
+	set my_pid [eval spawn $sacctmgr -i delete account $nm1]
+	expect {
+		-re "(There was a problem|Unknown condition|Bad format on|Bad MaxWall|Unknown option)" {
+			send_user "FAILURE: there was a problem with the sacctmgr command\n"
+			incr exit_code 1
+		}
+		-re "Problem getting" {
+			send_user "FAILURE: there was a problem getting information from the database\n"
+			incr exit_code 1
+		}
+		-re "Problem adding" {
+			send_user "FAILURE: there was an unknwon problem\n"
+			incr exit_code 1
+		}
+		-re "No associations" {
+			send_user "FAILURE: your command didn't return anything\n"
+			incr exit_code 1
+		}
+		-re "$check" {
+			incr matches
+			exp_continue
+		}
+		-re " Nothing deleted" {
+			incr matches
+			set nothing 1
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sacctmgr add not responding\n"
+			slow_kill $my_pid
+			incr exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$matches != 1} {
+		send_user "\nFAILURE:  sacctmgr had a problem deleting account.
+	got $matches\n"
+		incr exit_code 1
+	}
+
+	if { !$nothing } {
+		if { ![check_acct_associations] } {
+			send_user "\nFAILURE:  Our associations don't line up\n"
+			incr exit_code 1
+		}
+	}
+
 #
 # Use sacctmgr to create a cluster
 #
-set sadd_pid [spawn $sacctmgr -i add $clu $nams=$tc1 $fs=$fs1 $mc=$mc1 $mj=$mj1 $mn=$mn1 $mw=$mw1]
+set sadd_pid [spawn $sacctmgr -i add $clu $nams=$tc1]
 expect {
 	-re "privilege to preform this action" {
 		set access_err 1
@@ -115,7 +259,9 @@ if { ![check_acct_associations] } {
 #
 # Use sacctmgr to add an account
 #
-set sadel_pid [spawn $sacctmgr -i $add $acc $clu=$tc1 $des="$ds1" $fs=$fs1 $mc=$mc1 $mj=$mj1 $mn=$mn1 $mw=$mw1 $nams=$nm1 $org="$or1" $qs=$qs1]
+set sadel_pid [spawn $sacctmgr -i $add $acc $clu=$tc1 $des="$ds1" $fs=$fs1   \
+$gm=$gm1 $gc=$gc1 $gj=$gj1 $gn=$gn1 $gs=$gs1 $gw=$gw1 $mc=$mc1 $mm=$mm1 \
+$mj=$mj1 $ms=$ms1 $mn=$mn1 $mw=$mw1 $org="$or1" $qs=$qs1 $nams=$nm1]
 expect {
 	-re "Adding Account" {
 		incr aamatches
@@ -140,9 +286,9 @@ if {$aamatches != 1} {
 #
 # Use sacctmgr to list the test associations
 #
-set as_list_pid [spawn $sacctmgr list $ass $acc=$nm1 ]
+set as_list_pid [spawn $sacctmgr list $acc $wa $nam=$nm1 format=$nams,$fs,$gm,$gc,$gj,$gs,$gn,$gw,$mm,$mc,$mj,$ms,$mn,$mw]
 expect {
-	-re "$tc1 *$nm1" {
+	-re "$nm1,$fs1,$gm1,$gc1,$gj1,$gs1,$gn1,$gw1,$mm1,$mc1,$mj1,$ms1,$mn1,$mw1" {
 		exp_continue
 	}
 	timeout {
diff --git a/testsuite/expect/test21.11 b/testsuite/expect/test21.11
index 3411f0b80..c23e308aa 100755
--- a/testsuite/expect/test21.11
+++ b/testsuite/expect/test21.11
@@ -47,7 +47,7 @@ set mod		modify
 set nams	Names
 set nam		Name
 set fs		Fairshare
-set mc		MaxCPUSecs
+set mc		MaxCPUMins
 set mj		MaxJobs
 set mn		MaxNodes
 set mw		MaxWall
@@ -70,7 +70,7 @@ set or1		"Account Org A1"
 set qs		QosLevel
 set qs1		normal
 set access_err  0
-
+set timeout 60
 print_header $test_id
 
 if { [test_account_storage] == 0 } {
@@ -78,6 +78,14 @@ if { [test_account_storage] == 0 } {
 	exit 0
 }
  
+#
+# Verify if Administrator privileges
+#
+ if { [string compare [check_accounting_admin_level] "Administrator"] } {
+	send_user "\nWARNING: This test can't be run without being an Accounting administrator.\nUse sacctmgr mod user \$USER_NAME admin=admin.\n"
+	exit 0
+}
+
 #
 # Use sacctmgr to create a cluster
 #
diff --git a/testsuite/expect/test21.12 b/testsuite/expect/test21.12
index a7a363bbe..d66fab3a2 100755
--- a/testsuite/expect/test21.12
+++ b/testsuite/expect/test21.12
@@ -48,7 +48,7 @@ set mod		modify
 set nams	Names
 set nam		Name
 set fs		Fairshare
-set mc		MaxCPUSecs
+set mc		MaxCPUMins
 set mj		MaxJobs
 set mn		MaxNodes
 set mw		MaxWall
@@ -71,6 +71,7 @@ set or1		accountorga1
 set qs		QosLevel
 set qs1		normal
 set access_err  0
+set timeout 60
 
 print_header $test_id
 
@@ -79,6 +80,14 @@ if { [test_account_storage] == 0 } {
 	exit 0
 }
  
+#
+# Verify if Administrator privileges
+#
+ if { [string compare [check_accounting_admin_level] "Administrator"] } {
+	send_user "\nWARNING: This test can't be run without being an Accounting administrator.\nUse sacctmgr mod user \$USER_NAME admin=admin.\n"
+	exit 0
+}
+
 #
 # Use sacctmgr to create a cluster
 #
@@ -190,11 +199,11 @@ if { ![check_acct_associations] } {
 #
 set ac_list_pid [spawn $sacctmgr -n list $acc]
 expect {
-	-re "$nm1 *$ds1 *$or1 *$qs1" {
+	-re "$nm1 *$ds1 *$or1" {
 		incr aclmatches
 		exp_continue
 	}
-	-re "$nm2 *$ds1 *$or1 *$qs1" {
+	-re "$nm2 *$ds1 *$or1" {
 		incr aclmatches
 		exp_continue
 	}
diff --git a/testsuite/expect/test21.13 b/testsuite/expect/test21.13
index c6b907428..f145e52c8 100755
--- a/testsuite/expect/test21.13
+++ b/testsuite/expect/test21.13
@@ -74,6 +74,13 @@ if { [test_account_storage] == 0 } {
 	exit 0
 }
 
+#
+# Verify if Administrator privileges
+#
+ if { [string compare [check_accounting_admin_level] "Administrator"] } {
+	send_user "\nWARNING: This test can't be run without being an Accounting administrator.\nUse sacctmgr mod user \$USER_NAME admin=admin.\n"
+	exit 0
+}
 
 #
 # Use sacctmgr to create a cluster
@@ -397,7 +404,7 @@ proc _mod_acct { cluster name desc org parent fs maxcpu maxjob maxnodes maxwall
 	}
 
 	if { [string length $maxcpu] } {
-		set scommand "$scommand maxc=$maxcpu"
+		set scommand "$scommand maxcpum=$maxcpu"
 		set assoc_stuff 1
 	}
 
@@ -550,7 +557,7 @@ if { $exit_code } {
 # Use sacctmgr to list the test account modifications
 #
 set matches 0
-set my_pid [spawn $sacctmgr -n -p list assoc acc=$nm1,$nm2,$nm3 format="Account,Cluster,Fairshare,MaxC,MaxJ,MaxN,MaxW"]
+set my_pid [spawn $sacctmgr -n -p list assoc acc=$nm1,$nm2,$nm3 format="Account,Cluster,Fairshare,MaxCPUM,MaxJ,MaxN,MaxW"]
 expect {
 	-re "There was a problem" {
 	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
@@ -587,7 +594,7 @@ if { $exit_code } {
 # Use sacctmgr to list the test account modifications
 #
 set matches 0
-set my_pid [eval spawn $sacctmgr -n -p list acct withassoc acc=$nm1,$nm2,$nm3 format="Account,Desc,Org,Cluster,Fairshare,MaxC,MaxJ,MaxN,MaxW"]
+set my_pid [eval spawn $sacctmgr -n -p list acct withassoc acc=$nm1,$nm2,$nm3 format="Account,Desc,Org,Cluster,Fairshare,MaxCPUM,MaxJ,MaxN,MaxW"]
 expect {
 	-re "There was a problem" {
 	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
diff --git a/testsuite/expect/test21.14 b/testsuite/expect/test21.14
index b73c438cc..6c252f128 100755
--- a/testsuite/expect/test21.14
+++ b/testsuite/expect/test21.14
@@ -75,6 +75,11 @@ if { [test_account_storage] == 0 } {
 	exit 0
 }
 
+if { [string compare [check_accounting_admin_level] "Administrator"] } {
+	send_user "\nWARNING: This test can't be run without being an Accounting administrator.\nUse sacctmgr mod user \$USER_NAME admin=admin.\n"
+	exit 0
+}
+
 
 #
 # Use sacctmgr to create a cluster
@@ -398,7 +403,7 @@ proc _mod_acct { cluster name desc org parent fs maxcpu maxjob maxnodes maxwall
 	}
 
 	if { [string length $maxcpu] } {
-		set scommand "$scommand maxc=$maxcpu"
+		set scommand "$scommand maxcpum=$maxcpu"
 		set assoc_stuff 1
 	}
 
@@ -551,7 +556,7 @@ if { $exit_code } {
 # Use sacctmgr to list the test account modifications
 #
 set matches 0
-set my_pid [spawn $sacctmgr -n -p list assoc acc=$nm1,$nm2,$nm3 format="Account,Cluster,Fairshare,MaxC,MaxJ,MaxN,MaxW"]
+set my_pid [spawn $sacctmgr -n -p list assoc acc=$nm1,$nm2,$nm3 format="Account,Cluster,Fairshare,MaxCPUM,MaxJ,MaxN,MaxW"]
 expect {
 	-re "There was a problem" {
 	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
@@ -587,7 +592,7 @@ if { $exit_code } {
 # Use sacctmgr to list the test account modifications
 #
 set matches 0
-set my_pid [eval spawn $sacctmgr -n -p list acct withassoc acc=$nm1,$nm2,$nm3 format="Account,Desc,Org,Cluster,Fairshare,MaxC,MaxJ,MaxN,MaxW"]
+set my_pid [eval spawn $sacctmgr -n -p list acct withassoc acc=$nm1,$nm2,$nm3 format="Account,Desc,Org,Cluster,Fairshare,MaxCPUM,MaxJ,MaxN,MaxW"]
 expect {
 	-re "There was a problem" {
 	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
@@ -626,7 +631,7 @@ if { $exit_code } {
 # Use sacctmgr to list the test account modifications
 #
 set matches 0
-set my_pid [eval spawn $sacctmgr -n -p list assoc acc=$nm3 format="Account,ParentN,Cluster,Fairshare,MaxC,MaxJ,MaxN,MaxW"]
+set my_pid [eval spawn $sacctmgr -n -p list assoc acc=$nm3 format="Account,ParentN,Cluster,Fairshare,MaxCPUM,MaxJ,MaxN,MaxW"]
 expect {
 	-re "There was a problem" {
 	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
@@ -667,7 +672,7 @@ if { $exit_code } {
 # Use sacctmgr to list the test account modifications
 #
 set matches 0
-set my_pid [eval spawn $sacctmgr -n -p list assoc acc=$nm3,$nm2 format="Account,ParentN,Cluster,Fairshare,MaxC,MaxJ,MaxN,MaxW"]
+set my_pid [eval spawn $sacctmgr -n -p list assoc acc=$nm3,$nm2 format="Account,ParentN,Cluster,Fairshare,MaxCPUM,MaxJ,MaxN,MaxW"]
 expect {
 	-re "There was a problem" {
 	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
@@ -705,7 +710,7 @@ if { $exit_code } {
 # Use sacctmgr to list the test account modifications
 #
 set matches 0
-set my_pid [eval spawn $sacctmgr -n -p list assoc acc=$nm3,$nm2 format="Account,ParentN,Cluster,Fairshare,MaxC,MaxJ,MaxN,MaxW"]
+set my_pid [eval spawn $sacctmgr -n -p list assoc acc=$nm3,$nm2 format="Account,ParentN,Cluster,Fairshare,MaxCPUM,MaxJ,MaxN,MaxW"]
 expect {
 	-re "There was a problem" {
 	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
diff --git a/testsuite/expect/test21.15 b/testsuite/expect/test21.15
index 529e24a4c..562f5592e 100755
--- a/testsuite/expect/test21.15
+++ b/testsuite/expect/test21.15
@@ -102,6 +102,10 @@ if { [test_account_storage] == 0 } {
 	exit 0
 }
 
+if { [string compare [check_accounting_admin_level] "Administrator"] } {
+	send_user "\nWARNING: This test can't be run without being an Accounting administrator.\nUse sacctmgr mod user \$USER_NAME admin=admin.\n"
+	exit 0
+}
 
 #
 # Use sacctmgr to create a cluster
@@ -425,7 +429,7 @@ proc _mod_acct { cluster name desc org parent fs maxcpu maxjob maxnodes maxwall
 	}
 
 	if { [string length $maxcpu] } {
-		set scommand "$scommand maxc=$maxcpu"
+		set scommand "$scommand maxcpum=$maxcpu"
 		set assoc_stuff 1
 	}
 
@@ -544,7 +548,7 @@ proc _add_user { account adminlevel cluster defaultaccount fs maxcpu maxjob maxn
 	}
 
 	if { [string length $maxcpu] } {
-		set command "$command maxcpu=$maxcpu"
+		set command "$command maxcpum=$maxcpu"
 	}
 
 	if { [string length $maxjob] } {
@@ -734,7 +738,7 @@ if { $exit_code } {
 # Use sacctmgr to list the test user additions
 #
 set matches 0
-set my_pid [spawn $sacctmgr -n -p list user format="User,Def,Admin,clus,acc,fair,maxc,maxj,maxn,maxw"  names=$us1 withassoc]
+set my_pid [spawn $sacctmgr -n -p list user format="User,Def,Admin,clus,acc,fair,maxcpum,maxj,maxn,maxw"  names=$us1 withassoc]
 expect {
 	-re "There was a problem" {
 	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
diff --git a/testsuite/expect/test21.16 b/testsuite/expect/test21.16
index 5d318f87e..064ce7346 100755
--- a/testsuite/expect/test21.16
+++ b/testsuite/expect/test21.16
@@ -88,7 +88,7 @@ set mn		maxnode
 set mw		maxwall
 set dbu		debug
 set access_err  0
-
+#set user_name   "id -u -n"
 
 print_header $test_id
 
@@ -102,6 +102,10 @@ if { [test_account_storage] == 0 } {
 	exit 0
 }
 
+if { [string compare [check_accounting_admin_level] "Administrator"] } {
+	send_user "\nWARNING: This test can't be run without being an Accounting administrator.\nUse sacctmgr mod user \$USER_NAME admin=admin.\n"
+	exit 0
+}
 
 #
 # Use sacctmgr to create a cluster
@@ -425,7 +429,7 @@ proc _mod_acct { cluster name desc org parent fs maxcpu maxjob maxnodes maxwall
 	}
 
 	if { [string length $maxcpu] } {
-		set scommand "$scommand maxc=$maxcpu"
+		set scommand "$scommand maxcpum=$maxcpu"
 		set assoc_stuff 1
 	}
 
@@ -544,7 +548,7 @@ proc _add_user { account adminlevel cluster defaultaccount fs maxcpu maxjob maxn
 	}
 
 	if { [string length $maxcpu] } {
-		set command "$command maxcpu=$maxcpu"
+		set command "$command maxcpum=$maxcpu"
 	}
 
 	if { [string length $maxjob] } {
@@ -555,7 +559,7 @@ proc _add_user { account adminlevel cluster defaultaccount fs maxcpu maxjob maxn
 		set command "$command  maxnodes=$maxnodes"
 	}
 
-	if { [string length maxwall$] } {
+	if { [string length $maxwall] } {
 		set command "$command maxwall=$maxwall"
 	}
 
@@ -729,7 +733,7 @@ if { $exit_code } {
 # Use sacctmgr to list the test user additions
 #
 set matches 0
-set my_pid [spawn $sacctmgr -n -p list user format="User,Def,Admin,clus,acc,fair,maxc,maxj,maxn,maxw"  names=$us1,$us2,$us3 withassoc]
+set my_pid [spawn $sacctmgr -n -p list user format="User,Def,Admin,clus,acc,fair,maxcpum,maxj,maxn,maxw"  names=$us1,$us2,$us3 withassoc]
 expect {
 	-re "There was a problem" {
 	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
diff --git a/testsuite/expect/test21.17 b/testsuite/expect/test21.17
index e7376f3de..962416ea5 100755
--- a/testsuite/expect/test21.17
+++ b/testsuite/expect/test21.17
@@ -102,6 +102,11 @@ if { [test_account_storage] == 0 } {
 	exit 0
 }
 
+if { [string compare [check_accounting_admin_level] "Administrator"] } {
+	send_user "\nWARNING: This test can't be run without being an Accounting administrator.\nUse sacctmgr mod user \$USER_NAME admin=admin.\n"
+	exit 0
+}
+
 
 #
 # Use sacctmgr to create a cluster
@@ -711,7 +716,7 @@ proc _mod_user { adminlevel defaultaccount fs maxcpu maxjob maxnodes maxwall wac
 	}
 
 	if { [string length $maxcpu] } {
-		set scommand "$scommand maxc=$maxcpu"
+		set scommand "$scommand maxcpum=$maxcpu"
 		set assoc_stuff 1
 	}
 
@@ -872,7 +877,7 @@ if { $exit_code } {
 # Use sacctmgr to list the test user modifications
 #
 set matches 0
-set my_pid [spawn $sacctmgr -n -p list assoc users=$us1,$us2,$us3 format="User,Cluster,Fairshare,MaxC,MaxJ,MaxN,MaxW"]
+set my_pid [spawn $sacctmgr -n -p list assoc users=$us1,$us2,$us3 format="User,Cluster,Fairshare,Maxcpum,MaxJ,MaxN,MaxW"]
 expect {
 	-re "There was a problem" {
 	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
diff --git a/testsuite/expect/test21.18 b/testsuite/expect/test21.18
index 84662a10b..fc52a7a67 100755
--- a/testsuite/expect/test21.18
+++ b/testsuite/expect/test21.18
@@ -102,6 +102,11 @@ if { [test_account_storage] == 0 } {
 	exit 0
 }
 
+if { [string compare [check_accounting_admin_level] "Administrator"] } {
+	send_user "\nWARNING: This test can't be run without being an Accounting administrator.\nUse sacctmgr mod user \$USER_NAME admin=admin.\n"
+	exit 0
+}
+
 
 #
 # Use sacctmgr to create a cluster
@@ -425,7 +430,7 @@ proc _mod_acct { cluster name desc org parent fs maxcpu maxjob maxnodes maxwall
 	}
 
 	if { [string length $maxcpu] } {
-		set scommand "$scommand maxc=$maxcpu"
+		set scommand "$scommand maxcpum=$maxcpu"
 		set assoc_stuff 1
 	}
 
@@ -711,7 +716,7 @@ proc _mod_user { adminlevel defaultaccount fs maxcpu maxjob maxnodes maxwall wac
 	}
 
 	if { [string length $maxcpu] } {
-		set scommand "$scommand maxc=$maxcpu"
+		set scommand "$scommand maxcpum=$maxcpu"
 		set assoc_stuff 1
 	}
 
@@ -911,7 +916,7 @@ if { $exit_code } {
 # Use sacctmgr to list the test user modifications
 #
 set matches 0
-set my_pid [spawn $sacctmgr -n -p list assoc users=$us1,$us2,$us3 format="User,Cluster,Fairshare,MaxC,MaxJ,MaxN,MaxW"]
+set my_pid [spawn $sacctmgr -n -p list assoc users=$us1,$us2,$us3 format="User,Cluster,Fairshare,Maxcpum,MaxJ,MaxN,MaxW"]
 expect {
 	-re "There was a problem" {
 	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
@@ -951,7 +956,7 @@ if { $exit_code } {
 # Use sacctmgr to list the test user modifications
 #
 set matches 0
-set my_pid [eval spawn $sacctmgr -n -p list assoc user=$us1,$us2,$us3 format="User,Cluster,Fairshare,MaxC,MaxJ,MaxN,MaxW"]
+set my_pid [eval spawn $sacctmgr -n -p list assoc user=$us1,$us2,$us3 format="User,Cluster,Fairshare,Maxcpum,MaxJ,MaxN,MaxW"]
 expect {
 	-re "There was a problem" {
 	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
diff --git a/testsuite/expect/test21.19 b/testsuite/expect/test21.19
index 4ef4e6773..fba9161c0 100755
--- a/testsuite/expect/test21.19
+++ b/testsuite/expect/test21.19
@@ -102,6 +102,11 @@ if { [test_account_storage] == 0 } {
 	exit 0
 }
 
+if { [string compare [check_accounting_admin_level] "Administrator"] } {
+	send_user "\nWARNING: This test can't be run without being an Accounting administrator.\nUse sacctmgr mod user \$USER_NAME admin=admin.\n"
+	exit 0
+}
+
 
 #
 # Use sacctmgr to create a cluster
@@ -425,7 +430,7 @@ proc _mod_acct { cluster name desc org parent fs maxcpu maxjob maxnodes maxwall
 	}
 
 	if { [string length $maxcpu] } {
-		set scommand "$scommand maxc=$maxcpu"
+		set scommand "$scommand maxcpum=$maxcpu"
 		set assoc_stuff 1
 	}
 
@@ -716,7 +721,7 @@ proc _mod_user { adminlevel defaultaccount fs maxcpu maxjob maxnodes maxwall wac
 	}
 
 	if { [string length $maxcpu] } {
-		set scommand "$scommand maxc=$maxcpu"
+		set scommand "$scommand maxcpum=$maxcpu"
 		set assoc_stuff 1
 	}
 
@@ -997,7 +1002,7 @@ expect {
 	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
 	    	incr exit_code 1
 	}
-	-re "($us1.$nm1.$qs1.$aln.$nm1.|($us2|$us3).$nm1.$qs1.$aln..)" {
+	-re "($us1.$nm1.$aln.$nm1.|($us2|$us3).$nm1.$aln..)" {
 		incr matches
 		exp_continue
 	}
@@ -1035,7 +1040,7 @@ expect {
 	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
 	    	incr exit_code 1
 	}
-	-re "($us1.$nm1.$qs1.$aln.$nm1.|($us2|$us3).$nm1.$qs1.$aln.$nm1,$nm3)" {
+	-re "($us1.$nm1.$aln.$nm1.|($us2|$us3).$nm1.$aln.$nm1,$nm3)" {
 		incr matches
 		exp_continue
 	}
diff --git a/testsuite/expect/test21.20 b/testsuite/expect/test21.20
new file mode 100755
index 000000000..29586fe0e
--- /dev/null
+++ b/testsuite/expect/test21.20
@@ -0,0 +1,1147 @@
+#!/usr/bin/expect
+############################################################################
+# Purpose: Test of SLURM functionality
+#          sacctmgr QoS modify test
+#          
+#
+# Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
+#          "FAILURE: ..." otherwise with an explanation of the failure, OR
+#          anything else indicates a failure mode that must be investigated.
+############################################################################
+# Copyright (C) 2008 Lawrence Livermore National Security.
+# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+# Written by Joseph Donaghy <donaghy1@llnl.gov>
+# LLNL-CODE-402394.
+# 
+# This file is part of SLURM, a resource management program.
+# For details, see <http://www.llnl.gov/linux/slurm/>.
+#  
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+# 
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+# 
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+source ./globals
+
+set test_id     "test21.20"
+set exit_code   0
+set tc1		tcluster1
+set tc2		tcluster2
+set tc3		tcluster3
+set fs1		2500
+set fs2		1700
+set fs3		1
+set mc1		1000000
+set mc2		700000
+set mc3		1
+set mj1		50
+set mj2		70
+set mj3		1
+set mn1		300
+set mn2		700
+set mn3		1
+set mw1		01:00:00
+set mw2		00:07:00
+set mw3		00:01:00
+set clu		cluster
+set cl1		1tmach
+set cl2		2tmach
+set cl3		3tmach
+set acc		account
+set acc		account
+set nams	names
+set nm1		testaccta1
+set nm2		testaccta2
+set nm3		testaccta3
+set des		Description
+set ds1		testaccounta1
+set ds2		testacct
+set org		Organization
+set or1		accountorga1
+set or2		acttrg
+set qs		QosLevel
+set qs1		tnormal
+set qs2		texpedite
+set qs3		tstandby
+set par		parent
+set usr		user
+set us1		tuser1
+set us2		tuser2
+set us3		tuser3
+set al		AdminLevel
+set aln		None
+set ala		Administrator
+set alo		Operator
+set dac		DefaultAccount
+set pts		Partitions
+set fs		fairshare
+set mc		maxcpu
+set mj		maxjob
+set mn		maxnode
+set mw		maxwall
+set dbu		debug
+set was		withassoc
+set access_err  0
+
+#set user_name   "id -u -n"
+
+print_header $test_id
+
+set timeout 60
+
+#
+# Check accounting config and bail if not found.
+#
+if { [test_account_storage] == 0 } {
+	send_user "\nWARNING: This test can't be run without a usable AccountStorageType\n"
+	exit 0
+}
+
+if { [string compare [check_accounting_admin_level] "Administrator"] } {
+	send_user "\nWARNING: This test can't be run without being an Accounting administrator.\nUse sacctmgr mod user \$USER_NAME admin=admin.\n"
+	exit 0
+}
+
+#
+# Use sacctmgr to create a cluster
+#	
+proc _add_cluster {name} {
+        global sacctmgr timeout
+
+	set exit_code 0
+	set matches 0
+
+	if { ![string length $name] } {
+		send_user "FAILURE: we need a name to add\n"
+		return 1
+	}
+
+	set my_pid [spawn $sacctmgr -i add cluster $name]
+	expect {
+		-re "(There was a problem|Unknown condition|Bad format on|Bad MaxWall|Unknown option)" {
+			send_user "FAILURE: there was a problem with the sacctmgr command\n"
+			incr exit_code 1
+		}
+		-re "Problem getting" {
+			send_user "FAILURE: there was a problem getting information from the database\n"
+			incr exit_code 1
+		}
+		-re "Problem adding" {
+			send_user "FAILURE: there was an unknwon problem\n"
+			incr exit_code 1
+		}
+		-re "No associations" {
+			send_user "FAILURE: your command didn't return anything\n"
+			incr exit_code 1
+		}
+		-re "Adding Cluster" {
+			incr matches
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sacctmgr add not responding\n"
+			slow_kill $my_pid
+			exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$matches != 1} {
+		send_user "\nFAILURE:  sacctmgr had a problem adding clusters
+	got $matches\n"
+		incr exit_code 1
+	}
+
+	if { ![check_acct_associations] } {
+		send_user "\nFAILURE:  Our associations don't line up\n"
+		incr exit_code 1
+	}
+	
+	return $exit_code
+}
+
+#
+# Use sacctmgr to remove the test cluster
+#
+proc _remove_cluster {name} {
+        global access_err sacctmgr timeout
+
+	set exit_code 0
+	set matches 0
+	set nothing 0
+
+	if { ![string length $name] } {
+		send_user "FAILURE: we need a name to remove\n"
+		return 1
+	}
+
+	set my_pid [spawn $sacctmgr -i delete cluster $name]
+	expect {
+		-re "privilege to perform this action" {
+			set access_err 1
+			exp_continue
+		}
+		-re "(There was a problem|Unknown condition|Bad format on|Bad MaxWall|Unknown option)" {
+			send_user "FAILURE: there was a problem with the sacctmgr command\n"
+			incr exit_code 1
+		}
+		-re "Problem getting" {
+			send_user "FAILURE: there was a problem getting information from the database\n"
+			incr exit_code 1
+		}
+		-re "Problem adding" {
+			send_user "FAILURE: there was an unknwon problem\n"
+			incr exit_code 1
+		}
+		-re "No associations" {
+			send_user "FAILURE: your command didn't return anything\n"
+			incr exit_code 1
+		}
+		-re "Deleting clusters" {
+			incr matches
+			exp_continue
+		}
+		-re " Nothing deleted" {
+			incr matches
+			set nothing 1
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sacctmgr delete not responding\n"
+			slow_kill $my_pid
+			incr exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+	if {$access_err != 0} {
+		return 1
+	}
+	if {$matches != 1} {
+		send_user "\nFAILURE: sacctmgr had a problem deleting cluster got $matches\n"
+		incr exit_code 1
+	}
+	if { !$nothing } {
+		if { ![check_acct_associations] } {
+			send_user "\nFAILURE:  Our associations don't line up\n"
+			incr exit_code 1
+		}
+	}
+
+	return $exit_code
+}
+
+#
+# Use sacctmgr to create a QoS
+#	
+proc _add_qos {name} {
+        global sacctmgr timeout
+
+	set exit_code 0
+	set matches 0
+
+	if { ![string length $name] } {
+		send_user "FAILURE: Need name of qos to add\n"
+		return 1
+	}
+
+	set my_pid [spawn $sacctmgr -i add qos $name]
+	expect {
+		-re "(There was a problem|Unknown condition|Unknown field|Unknown option)" {
+			send_user "FAILURE: there was a problem with the sacctmgr command\n"
+			incr exit_code 1
+		}
+		-re "Problem getting" {
+			send_user "FAILURE: there was a problem getting qos's from the database\n"
+			incr exit_code 1
+		}
+		-re "Problem adding" {
+			send_user "FAILURE: there was an unknown problem\n"
+			incr exit_code 1
+		}
+		-re "No associations" {
+			send_user "FAILURE: your command didn't return anything\n"
+			incr exit_code 1
+		}
+		-re "Adding QOS" {
+			incr matches
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sacctmgr add not responding\n"
+			slow_kill $my_pid
+			exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$matches != 1} {
+		send_user "\nFAILURE:  sacctmgr had a problem adding QoS got $matches\n"
+		incr exit_code 1
+	}
+
+	if { ![check_acct_associations] } {
+		send_user "\nFAILURE:  Our associations don't line up\n"
+		incr exit_code 1
+	}
+	
+	return $exit_code
+}
+
+#
+# Use sacctmgr to remove the test QoS
+#
+proc _remove_qos {name} {
+        global access_err sacctmgr timeout
+
+	set exit_code 0
+	set matches 0
+	set nothing 0
+
+	if { ![string length $name] } {
+		send_user "FAILURE: we need a name to remove\n"
+		return 1
+	}
+
+	set my_pid [spawn $sacctmgr -i delete qos $name]
+	expect {
+		-re "privilege to perform this action" {
+			set access_err 1
+			exp_continue
+		}
+		-re "(There was a problem|Unknown condition|Unknown field|Unknown option)" {
+			send_user "FAILURE: there was a problem with the sacctmgr command\n"
+			incr exit_code 1
+		}
+		-re "Problem getting" {
+			send_user "FAILURE: there was a problem getting information from the database\n"
+			incr exit_code 1
+		}
+		-re "Problem adding" {
+			send_user "FAILURE: there was an unknown problem\n"
+			incr exit_code 1
+		}
+		-re "No associations" {
+			send_user "FAILURE: your command didn't return anything\n"
+			incr exit_code 1
+		}
+		-re "Deleting QOS" {
+			incr matches
+			exp_continue
+		}
+		-re " Nothing deleted" {
+			incr matches
+			set nothing 1
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sacctmgr delete not responding\n"
+			slow_kill $my_pid
+			incr exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+	if {$access_err != 0} {
+		return 1
+	}
+	if {$matches != 1} {
+		send_user "\nFAILURE: sacctmgr had a problem deleting QoS got $matches\n"
+		incr exit_code 1
+	}
+	if { !$nothing } {
+		if { ![check_acct_associations] } {
+			send_user "\nFAILURE:  Our associations don't line up\n"
+			incr exit_code 1
+		}
+	}
+
+	return $exit_code
+}
+
+#
+# Use sacctmgr to add an account
+#
+proc _add_acct { cluster name } {
+        global sacctmgr timeout
+
+	set exit_code 0
+	set matches 0
+
+	if { ![string length $name] } {
+		send_user "FAILURE: we need a name to add\n"
+		return 1
+	}
+
+	set command "$name"
+
+	if { [string length $cluster] } {
+		set command "$command cluster=$cluster"
+	}
+
+	set my_pid [eval spawn $sacctmgr -i add account $command]
+	expect {
+		-re "(There was a problem|Unknown condition|Bad format on|Bad MaxWall|Unknown option)" {
+			send_user "FAILURE: there was a problem with the sacctmgr command\n"
+			incr exit_code 1
+		}
+		-re "Problem getting" {
+			send_user "FAILURE: there was a problem getting information from the database\n"
+			incr exit_code 1
+		}
+		-re "Problem adding" {
+			send_user "FAILURE: there was an unknwon problem\n"
+			incr exit_code 1
+		}
+		-re "No associations" {
+			send_user "FAILURE: your command didn't return anything\n"
+			incr exit_code 1
+		}
+		-re "Adding Account" {
+			incr matches
+			exp_continue
+		}
+		-re "Associations" {
+			incr matches
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sacctmgr add not responding\n"
+			slow_kill $my_pid
+			incr exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$matches != 2} {
+		send_user "\nFAILURE:  sacctmgr had a problem adding account.
+	got $matches\n"
+		incr exit_code 1
+	}
+
+	if { ![check_acct_associations] } {
+		send_user "\nFAILURE:  Our associations don't line up\n"
+		incr exit_code 1
+	}
+	return $exit_code
+}
+
+#
+# Use sacctmgr to remove an account
+#
+proc _remove_acct { cluster name } {
+        global sacctmgr timeout
+
+	set exit_code 0
+	set matches 0
+	set nothing 1
+	set check "Deleting account"
+
+	if { ![string length $name] } {
+		send_user "FAILURE: we need a name to remove\n"
+		return 1
+	}
+
+	set command "$name"
+
+	if { [string length $cluster] } {
+		set command "$command cluster=$cluster"
+		set check "Deleting account associations"
+	}
+
+	set my_pid [eval spawn $sacctmgr -i delete account $command]
+	expect {
+		-re "(There was a problem|Unknown condition|Bad format on|Bad MaxWall|Unknown option)" {
+			send_user "FAILURE: there was a problem with the sacctmgr command\n"
+			incr exit_code 1
+		}
+		-re "Problem getting" {
+			send_user "FAILURE: there was a problem getting information from the database\n"
+			incr exit_code 1
+		}
+		-re "Problem adding" {
+			send_user "FAILURE: there was an unknwon problem\n"
+			incr exit_code 1
+		}
+		-re "No associations" {
+			send_user "FAILURE: your command didn't return anything\n"
+			incr exit_code 1
+		}
+		-re "$check" {
+			incr matches
+			exp_continue
+		}
+		-re " Nothing deleted" {
+			incr matches
+			set nothing 1
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sacctmgr add not responding\n"
+			slow_kill $my_pid
+			incr exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$matches != 1} {
+		send_user "\nFAILURE:  sacctmgr had a problem deleting account.
+	got $matches\n"
+		incr exit_code 1
+	}
+
+	if { !$nothing } {
+		if { ![check_acct_associations] } {
+			send_user "\nFAILURE:  Our associations don't line up\n"
+			incr exit_code 1
+		}
+	}
+
+	return $exit_code
+}
+
+#
+# Use sacctmgr to modify an account
+#
+proc _mod_acct { cluster name desc org parent fs maxcpu maxjob maxnodes maxwall wdesc worg} {
+	global sacctmgr timeout
+	
+	set exit_code 0
+	set matches 0
+	set expected 0
+	set acct_stuff 0
+	set assoc_stuff 0
+
+	if { ![string length $name] } {
+		send_user "FAILURE: we need a name to modify\n"
+		return 1
+	}
+
+	#set up the where
+	set wcommand "where $name"
+
+	if { [string length $cluster] } {
+		set wcommand "$wcommand cluster=$cluster"
+	}
+
+	if { [string length $wdesc] } {
+		set wcommand "$wcommand description='$wdesc'"
+	}
+
+	if { [string length $worg] } {
+		set wcommand "$wcommand organization='$worg'"
+	}
+
+	#set up the set
+	set scommand "set"
+	if { [string length $parent] } {
+		set scommand "$scommand parent=$parent"
+		set assoc_stuff 1
+	}
+
+	if { [string length $fs] } {
+		set scommand "$scommand fairshare=$fs"
+		set assoc_stuff 1
+	}
+
+	if { [string length $maxcpu] } {
+		set scommand "$scommand maxc=$maxcpu"
+		set assoc_stuff 1
+	}
+
+	if { [string length $maxjob] } {
+		set scommand "$scommand maxj=$maxjob"
+		set assoc_stuff 1
+	}
+
+	if { [string length $maxnodes] } {
+		set scommand "$scommand maxn=$maxnodes"
+		set assoc_stuff 1
+	}
+
+	if { [string length $maxwall] } {
+		set scommand "$scommand maxw=$maxwall"
+		set assoc_stuff 1
+	}
+
+	if { [string length $desc] } {
+		set scommand "$scommand description='$desc'"
+		set acct_stuff 1
+	}
+
+	if { [string length $org] } {
+		set scommand "$scommand organization='$org'"
+		set acct_stuff 1
+	}
+
+	incr expected $acct_stuff
+	incr expected $assoc_stuff
+
+	set my_pid [eval spawn $sacctmgr -i modify account $scommand $wcommand ]
+	expect {
+		-re "(There was a problem|Unknown condition|Bad format on|Bad MaxWall|Unknown option)" {
+			send_user "FAILURE: there was a problem with the sacctmgr command\n"
+			incr exit_code 1
+		}
+		-re "Problem getting" {
+			send_user "FAILURE: there was a problem getting information from the database\n"
+			incr exit_code 1
+		}
+		-re "Problem adding" {
+			send_user "FAILURE: there was an unknwon problem\n"
+			incr exit_code 1
+		}
+		-re "No associations" {
+			send_user "FAILURE: your command didn't return anything\n"
+			incr exit_code 1
+		}
+		-re "Modified accounts" {
+			incr matches
+			exp_continue
+		}
+		-re "Modified account associations" {
+			incr matches
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sacctmgr add not responding\n"
+			slow_kill $my_pid
+			incr exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$matches != $expected} {
+		send_user "\nFAILURE:  sacctmgr had a problem modifying account.
+	got $matches needed $expected\n"
+		incr exit_code 1
+	}
+	
+	if { ![check_acct_associations] } {
+		send_user "\nFAILURE:  Our associations don't line up\n"
+		incr exit_code 1
+	}
+	return $exit_code
+}
+
+
+#
+# Use sacctmgr to add an user
+#
+proc _add_user { account adminlevel cluster defaultaccount qoslevel name } {
+        global sacctmgr timeout
+
+	set exit_code 0
+	set matches 0
+
+	if { ![string length $name] } {
+		send_user "FAILURE: we need a name to add\n"
+		return 1
+	}
+
+	set command "$name"
+
+	if { [string length $account] } {
+		set command "$command account=$account"
+	}
+
+	if { [string length $adminlevel] } {
+		set command "$command adminlevel=$adminlevel"
+	}
+
+	if { [string length $cluster] } {
+		set command "$command cluster=$cluster"
+	}
+
+	if { [string length $defaultaccount] } {
+		set command "$command defaultaccount=$defaultaccount"
+	}
+
+	if { [string length $qoslevel] } {
+		set command "$command qoslevel=$qoslevel"
+	}
+
+	if { [string length $name] } {
+		set command "$command name=$name"
+	}
+
+	set my_pid [eval spawn $sacctmgr -i add user $command]
+	expect {
+		-re "(There was a problem|Unknown condition|Bad format on|Bad MaxWall|Unknown option)" {
+			send_user "FAILURE: there was a problem with the sacctmgr command\n"
+			incr exit_code 1
+		}
+		-re "Problem getting" {
+			send_user "FAILURE: there was a problem getting information from the database\n"
+			incr exit_code 1
+		}
+		-re "Problem adding" {
+			send_user "FAILURE: there was an unknwon problem\n"
+			incr exit_code 1
+		}
+		-re "No associations" {
+			send_user "FAILURE: your command didn't return anything\n"
+			incr exit_code 1
+		}
+		-re "Adding User" {
+			incr matches
+			exp_continue
+		}
+		-re "Associations" {
+			incr matches
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sacctmgr add not responding\n"
+			slow_kill $my_pid
+			incr exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$matches != 2} {
+		send_user "\nFAILURE:  sacctmgr had a problem adding user.
+	got $matches\n"
+		incr exit_code 1
+	}
+
+	if { ![check_acct_associations] } {
+		send_user "\nFAILURE:  Our associations don't line up\n"
+		incr exit_code 1
+	}
+	return $exit_code
+}
+
+#
+# Use sacctmgr to remove an user
+#
+proc _remove_user { acct user } {
+        global sacctmgr timeout
+
+	set exit_code 0
+	set matches 0
+	set nothing 1
+	set check "Deleting user"
+
+	if { ![string length $user] } {
+		send_user "FAILURE: we need a name to remove\n"
+		return 1
+	}
+
+	set command "$user"
+
+	if { [string length $acct] } {
+		set command "$command account=$acct"
+		set check "Deleting user associations"
+	}
+
+	set my_pid [eval spawn $sacctmgr -i delete user $command]
+	expect {
+		-re "(There was a problem|Unknown condition|Bad format on|Bad MaxWall|Unknown option)" {
+			send_user "FAILURE: there was a problem with the sacctmgr command\n"
+			incr exit_code 1
+		}
+		-re "Problem getting" {
+			send_user "FAILURE: there was a problem getting information from the database\n"
+			incr exit_code 1
+		}
+		-re "Problem adding" {
+			send_user "FAILURE: there was an unknown problem\n"
+			incr exit_code 1
+		}
+		-re "No associations" {
+			send_user "FAILURE: your command didn't return anything\n"
+			incr exit_code 1
+		}
+		-re "$check" {
+			incr matches
+			exp_continue
+		}
+		-re " Nothing deleted" {
+			incr matches
+			set nothing 1
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sacctmgr delete not responding\n"
+			slow_kill $my_pid
+			incr exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$matches != 1} {
+		send_user "\nFAILURE:  sacctmgr had a problem deleting user.
+	got $matches\n"
+		incr exit_code 1
+	}
+
+	if { !$nothing } {
+		if { ![check_acct_associations] } {
+			send_user "\nFAILURE:  Our associations don't line up\n"
+			incr exit_code 1
+		}
+	}
+
+	return $exit_code
+}
+
+#
+# Use sacctmgr to modify an user
+#
+proc _mod_user { qoslevel waccounts wcluster wnames} {
+	global sacctmgr timeout
+	
+	set exit_code 0
+	set matches 0
+	set expected 0
+	set acct_stuff 0
+	set assoc_stuff 0
+
+	if { ![string length $wnames] } {
+		send_user "FAILURE: we need a name to modify\n"
+		return 1
+	}
+
+	#set up the where
+	set wcommand "where"
+
+	if { [string length $wcluster] } {
+		set wcommand "$wcommand cluster=$wcluster"
+	}
+
+	if { [string length $wnames] } {
+		set wcommand "$wcommand names='$wnames'"
+	}
+
+	if { [string length $waccounts] } {
+		set wcommand "$wcommand account='$waccounts'"
+	}
+
+	#set up the set
+	set scommand "set"
+
+	if { [string length $qoslevel] } {
+		set scommand "$scommand qoslevel$qoslevel"
+		set acct_stuff 1
+	}
+
+	incr expected $acct_stuff
+	incr expected $assoc_stuff
+
+	set my_pid [eval spawn $sacctmgr -i modify user $scommand $wcommand ]
+	expect {
+		-re "(There was a problem|Unknown condition|Bad format on|Bad MaxWall|Unknown option)" {
+			send_user "FAILURE: there was a problem with the sacctmgr command\n"
+			incr exit_code 1
+		}
+		-re "Problem getting" {
+			send_user "FAILURE: there was a problem getting information from the database\n"
+			incr exit_code 1
+		}
+		-re "Problem adding" {
+			send_user "FAILURE: there was an unknown problem\n"
+			incr exit_code 1
+		}
+		-re "No associations" {
+			send_user "FAILURE: your command didn't return anything\n"
+			incr exit_code 1
+		}
+		-re "Modified account associations" {
+			incr matches
+			exp_continue
+		}
+		-re "Modified users" {
+			incr matches
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sacctmgr modify not responding\n"
+			slow_kill $my_pid
+			incr exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$matches != $expected} {
+		send_user "\nFAILURE:  sacctmgr had a problem modifying user.
+	got $matches needed $expected\n"
+		incr exit_code 1
+	}
+	
+	if { ![check_acct_associations] } {
+		send_user "\nFAILURE:  Our associations don't line up\n"
+		incr exit_code 1
+	}
+	return $exit_code
+}
+
+#make sure we have a clean system and permission to do this work
+_remove_user "" "$us1,$us2,$us3"
+_remove_acct "" "$nm1,$nm2,$nm3"
+_remove_qos "$qs1,$qs2,$qs3"
+_remove_cluster "$tc1,$tc2,$tc3"
+if {$access_err != 0} {
+	send_user "\nWARNING: not authorized to perform this test\n"
+	exit $exit_code
+}
+
+#add cluster
+incr exit_code [_add_cluster "$tc1,$tc2,$tc3"]
+if { $exit_code } {
+	_remove_user "" "$us1,$us2,$us3"
+	_remove_acct "" "$nm1,$nm2,$nm3"
+	_remove_qos "$qs1,$qs2,$qs3"
+	_remove_cluster "$tc1,$tc2,$tc3"
+	exit $exit_code
+}
+
+#add qos
+incr exit_code [_add_qos "$qs1,$qs2,$qs3"]
+if { $exit_code } {
+	_remove_user "" "$us1,$us2,$us3"
+	_remove_acct "" "$nm1,$nm2,$nm3"
+	_remove_qos "$qs1,$qs2,$qs3"
+	_remove_cluster "$tc1,$tc2,$tc3"
+	exit $exit_code
+}
+
+#add accounts
+incr exit_code [_add_acct "$tc1,$tc2,$tc3" "$nm1,$nm2,$nm3"]
+if { $exit_code } {
+	_remove_user "" "$us1,$us2,$us3"
+	_remove_acct "" "$nm1,$nm2,$nm3"
+	_remove_qos "$qs1,$qs2,$qs3"
+	_remove_cluster "$tc1,$tc2,$tc3"
+	exit $exit_code
+}
+
+#add users
+# account adminlevel cluster defaultaccount qoslevel name
+# account adminlevel cluster defaultaccount fs maxcpu maxjob maxnodes maxwall name
+incr exit_code [_add_user "$nm1,$nm2,$nm3" "$alo" "$tc1,$tc2,$tc3" "$nm2" "$qs1,$qs2" $us1,$us2,$us3]
+if { $exit_code } {
+	_remove_user "" "$us1,$us2,$us3"
+	_remove_acct "" "$nm1,$nm2,$nm3"
+	_remove_qos "$qs1,$qs2,$qs3"
+	_remove_cluster "$tc1,$tc2,$tc3"
+	exit $exit_code
+}
+
+#
+# Use sacctmgr to list the test qos additions
+#
+set matches 0
+set my_pid [spawn $sacctmgr -n -p list user format="User,QosLevel" names=$us1,$us2,$us3 $was]
+expect {
+	-re "There was a problem" {
+	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
+	    	incr exit_code 1
+	}
+	-re "(($us1|$us2|$us3).($qs1|$qs2))." {
+		incr matches
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sacctmgr list associations not responding\n"
+		slow_kill $my_pid
+		incr exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+if {$matches != 27} {
+	send_user "\nFAILURE:  Account addition 1 incorrect with only $matches.\n"
+	incr exit_code 1
+}
+
+#modify user1 QoS
+# account adminlevel cluster defaultaccount qoslevel name
+# qoslevel waccounts wcluster wnames
+#
+incr exit_code [_mod_user "=$qs3" "$nm2" "$tc1,$tc2,$tc3" $us1]
+if { $exit_code } {
+	_remove_user "" "$us1,$us2,$us3"
+	_remove_acct "" "$nm1,$nm2,$nm3"
+	_remove_qos "$qs1,$qs2,$qs3"
+	_remove_cluster "$tc1,$tc2,$tc3"
+	exit $exit_code
+}
+
+#
+# Use sacctmgr to list the test qos modifications
+#
+set matches 0
+set my_pid [spawn $sacctmgr -n -p list user format="User,QosLevel" names=$us1,$us2,$us3 $was]
+expect {
+	-re "There was a problem" {
+	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
+	    	incr exit_code 1
+	}
+	-re "$us1.$qs3." {
+		incr matches
+		exp_continue
+	}
+	-re "($us2|$us3).$qs2.$qs1." {
+		incr matches
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sacctmgr list associations not responding\n"
+		slow_kill $my_pid
+		incr exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+if {$matches != 21} {
+	send_user "\nFAILURE:  Account addition 1 incorrect with only $matches.\n"
+	incr exit_code 1
+}
+
+#modify user2 QoS
+# account adminlevel cluster defaultaccount qoslevel name
+# qoslevel waccounts wcluster wnames
+#
+incr exit_code [_mod_user "+=$qs3" "$nm2" "$tc1,$tc2,$tc3" $us2]
+if { $exit_code } {
+	_remove_user "" "$us1,$us2,$us3"
+	_remove_acct "" "$nm1,$nm2,$nm3"
+	_remove_qos "$qs1,$qs2,$qs3"
+	_remove_cluster "$tc1,$tc2,$tc3"
+	exit $exit_code
+}
+
+#
+# Use sacctmgr to list the test qos modifications
+#
+set matches 0
+set my_pid [spawn $sacctmgr -n -p list user format="User,QosLevel" names=$us1,$us2,$us3 $was]
+expect {
+	-re "There was a problem" {
+	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
+	    	incr exit_code 1
+	}
+	-re "$us1.$qs3." {
+		incr matches
+		exp_continue
+	}
+	-re "$us2.$qs2.$qs1.$qs3." {
+		incr matches
+		exp_continue
+	}
+	-re "$us3.$qs2.$qs1." {
+		incr matches
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sacctmgr list associations not responding\n"
+		slow_kill $my_pid
+		incr exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+if {$matches != 15} {
+	send_user "\nFAILURE:  Account addition 1 incorrect with only $matches.\n"
+	incr exit_code 1
+}
+
+
+#modify user3 QoS
+# account adminlevel cluster defaultaccount qoslevel name
+# qoslevel waccounts wcluster wnames
+#
+incr exit_code [_mod_user "-=$qs2" "$nm2" "$tc1,$tc2,$tc3" $us3]
+if { $exit_code } {
+	_remove_user "" "$us1,$us2,$us3"
+	_remove_acct "" "$nm1,$nm2,$nm3"
+	_remove_qos "$qs1,$qs2,$qs3"
+	_remove_cluster "$tc1,$tc2,$tc3"
+	exit $exit_code
+}
+
+#
+# Use sacctmgr to list the test qos modifications
+#
+set matches 0
+set my_pid [spawn $sacctmgr -n -p list user format="User,QosLevel" names=$us1,$us2,$us3 $was]
+expect {
+	-re "There was a problem" {
+	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
+	    	incr exit_code 1
+	}
+	-re "$us1.$qs3." {
+		incr matches
+		exp_continue
+	}
+	-re "$us2.$qs2.$qs1.$qs3." {
+		incr matches
+		exp_continue
+	}
+	-re "$us3.$qs1." {
+		incr matches
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sacctmgr list associations not responding\n"
+		slow_kill $my_pid
+		incr exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+if {$matches != 9} {
+	send_user "\nFAILURE:  Account addition 1 incorrect with only $matches.\n"
+	incr exit_code 1
+}
+
+# This is the end below here
+
+incr exit_code [_remove_user "" "$us1,$us2,$us3"]
+incr exit_code [_remove_acct "" "$nm1,$nm2,$nm3"]
+incr exit_code [_remove_qos "$qs1,$qs2,$qs3"]
+incr exit_code [_remove_cluster "$tc1,$tc2,$tc3"]
+
+if {$exit_code == 0} {
+	send_user "\nSUCCESS\n"
+} else {
+	send_user "\nFAILURE\n"
+}
+exit $exit_code
+
diff --git a/testsuite/expect/test21.5 b/testsuite/expect/test21.5
index 4110ad118..ce86a2716 100755
--- a/testsuite/expect/test21.5
+++ b/testsuite/expect/test21.5
@@ -44,15 +44,31 @@ set mod		modify
 set nams	Names
 set nam		Name
 set fs		Fairshare
-set mc		MaxCPUSecs
+set gm		GrpCPUMins
+set gc		GrpCPUs
+set gj		GrpJobs
+set gs		GrpSubmitJobs
+set gn		GrpNodes
+set gw		GrpWall
+set mm		MaxCPUMins
+set mc		MaxCPUs
 set mj		MaxJobs
+set ms		MaxSubmitJobs
 set mn		MaxNodes
 set mw		MaxWall
 set clu		cluster
 set tc1		tcluster1
 set fs1		2500
-set mc1		1000000
-set mj1		50
+set gm1		1000000
+set gc1		50
+set gj1		100
+set gs1		500
+set gn1		300
+set gw1		00:45:00
+set mc1		100
+set mm1		100000
+set mj1		500
+set ms1		500
 set mn1		300
 set mw1		01:00:00
 set access_err  0
@@ -67,6 +83,11 @@ if { [test_account_storage] == 0 } {
 	exit 0
 }
 
+if { [string compare [check_accounting_admin_level] "Administrator"] } {
+	send_user "\nWARNING: This test can't be run without being an Accounting administrator.\nUse sacctmgr mod user \$USER_NAME admin=admin.\n"
+	exit 0
+}
+
 #
 # Use sacctmgr to delete the test cluster
 #
@@ -131,7 +152,9 @@ set sadel_pid [spawn $sacctmgr -i $del $clu $tc1]
 #
 # Use sacctmgr to create a cluster
 #
-set sadd_pid [spawn $sacctmgr -i add $clu $nams=$tc1 $fs=$fs1 $mc=$mc1 $mj=$mj1 $mn=$mn1 $mw=$mw1]
+set sadd_pid [spawn $sacctmgr -i add $clu $nams=$tc1 $fs=$fs1 $gm=$gm1 \
+$gc=$gc1 $gj=$gj1 $gn=$gn1 $gs=$gs1 $gw=$gw1 $mc=$mc1 $mm=$mm1 \
+$mj=$mj1 $ms=$ms1 $mn=$mn1 $mw=$mw1]
 expect {
 	-re "privilege to preform this action" {
 		set access_err 1
@@ -145,7 +168,7 @@ expect {
 		incr amatches
 		exp_continue
 	}
-	-re "User Defaults" {
+	-re "Default Limits" {
 		incr amatches
 		exp_continue
 	}
@@ -153,6 +176,34 @@ expect {
 		incr amatches
 		exp_continue
 	}
+	-re "$gm *= $gm1" {
+		incr amatches
+		exp_continue
+	}
+	-re "$gc *= $gc1" {
+		incr amatches
+		exp_continue
+	}
+	-re "$gj *= $gj1" {
+		incr amatches
+		exp_continue
+	}
+	-re "$gn *= $gn1" {
+		incr amatches
+		exp_continue
+	}
+	-re "$gs *= $gs1" {
+		incr amatches
+		exp_continue
+	}
+	-re "$gw *= $gw1" {
+		incr amatches
+		exp_continue
+	}
+	-re "$mm *= $mm1" {
+		incr amatches
+		exp_continue
+	}
 	-re "$mc *= $mc1" {
 		incr amatches
 		exp_continue
@@ -165,6 +216,10 @@ expect {
 		incr amatches
 		exp_continue
 	}
+	-re "$ms *= $ms1" {
+		incr amatches
+		exp_continue
+	}
 	-re "$mw *= $mw1" {
 		incr amatches
 		exp_continue
@@ -182,7 +237,7 @@ if {$access_err != 0} {
 	send_user "\nWARNING: not authorized to perform this test\n"
 	exit $exit_code
 }
-if {$amatches != 8} {
+if {$amatches != 16} {
 	send_user "\nFAILURE:  sacctmgr had a problem adding clusters got $amatches\n"
 	set exit_code 1
 }
diff --git a/testsuite/expect/test21.6 b/testsuite/expect/test21.6
index 3bfb62ac5..5a2e4a040 100755
--- a/testsuite/expect/test21.6
+++ b/testsuite/expect/test21.6
@@ -44,8 +44,16 @@ set mod		modify
 set nams	Names
 set nam		Name
 set fs		Fairshare
-set mc		MaxCPUSecs
+set gm		GrpCPUMins
+set gc		GrpCPUs
+set gj		GrpJobs
+set gs		GrpSubmitJobs
+set gn		GrpNodes
+set gw		GrpWall
+set mm		MaxCPUMins
+set mc		MaxCPUs
 set mj		MaxJobs
+set ms		MaxSubmitJobs
 set mn		MaxNodes
 set mw		MaxWall
 set clu		cluster
@@ -53,23 +61,104 @@ set tc1		tcluster1
 set tc2		tcluster2
 set tc3		tcluster3
 set fs1		2500
-set mc1		1000000
-set mj1		50
+set gm1		1000000
+set gc1		50
+set gj1		100
+set gs1		500
+set gn1		300
+set gw1		00:45:00
+set mc1		100
+set mm1		100000
+set mj1		500
+set ms1		500
 set mn1		300
 set mw1		01:00:00
+
 set access_err  0
 
 print_header $test_id
 
+#
+# Check accounting config and bail if not found.
+#
 if { [test_account_storage] == 0 } {
 	send_user "\nWARNING: This test can't be run without a usable AccountStorageType\n"
 	exit 0
 }
- 
+
+if { [string compare [check_accounting_admin_level] "Administrator"] } {
+	send_user "\nWARNING: This test can't be run without being an Accounting administrator.\nUse sacctmgr mod user \$USER_NAME admin=admin.\n"
+	exit 0
+}
+
+#
+# Use sacctmgr to delete the test cluster
+#
+	set nothing 0
+	set matches 0
+
+set sadel_pid [spawn $sacctmgr -i $del $clu $tc1,$tc2,$tc3]
+
+	expect {
+		-re "privilege to perform this action" {
+			set access_err 1
+			exp_continue
+		}
+		-re "(There was a problem|Unknown condition|Bad format on|Bad MaxWall|Unknown option)" {
+			send_user "FAILURE: there was a problem with the sacctmgr command\n"
+			incr exit_code 1
+		}
+		-re "Problem getting" {
+			send_user "FAILURE: there was a problem getting information from the database\n"
+			incr exit_code 1
+		}
+		-re "Problem adding" {
+			send_user "FAILURE: there was an unknwon problem\n"
+			incr exit_code 1
+		}
+		-re "No associations" {
+			send_user "FAILURE: your command didn't return anything\n"
+			incr exit_code 1
+		}
+		-re "Deleting clusters" {
+			incr matches
+			exp_continue
+		}
+		-re " Nothing deleted" {
+			incr matches
+			set nothing 1
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sacctmgr delete not responding\n"
+			slow_kill $my_pid
+			incr exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+	if {$access_err != 0} {
+		return 1
+	}
+	if {$matches != 1} {
+		send_user "\nFAILURE: sacctmgr had a problem deleting cluster got $matches\n"
+		incr exit_code 1
+	}
+	if { !$nothing } {
+		if { ![check_acct_associations] } {
+			send_user "\nFAILURE:  Our associations don't line up\n"
+			incr exit_code 1
+		}
+	}
+
 #
 # Use sacctmgr to create a cluster
 #
-set sadd_pid [spawn $sacctmgr $add $clu $nams=$tc1,$tc2,$tc3 $fs=$fs1 $mc=$mc1 $mj=$mj1 $mn=$mn1 $mw=$mw1]
+set sadd_pid [spawn $sacctmgr $add $clu $nams=$tc1,$tc2,$tc3 $fs=$fs1 \
+$gm=$gm1 $gc=$gc1 $gj=$gj1 $gn=$gn1 $gs=$gs1 $gw=$gw1 $mc=$mc1 $mm=$mm1 \
+$mj=$mj1 $ms=$ms1 $mn=$mn1 $mw=$mw1]
+
 expect {
 	-re "privilege to preform this action" {
 		set access_err 1
@@ -91,7 +180,7 @@ expect {
 		incr amatches
 		exp_continue
 	}
-	-re "User Defaults" {
+	-re "Default Limits" {
 		incr amatches
 		exp_continue
 	}
@@ -99,6 +188,34 @@ expect {
 		incr amatches
 		exp_continue
 	}
+	-re "$gm *= $gm1" {
+		incr amatches
+		exp_continue
+	}
+	-re "$gc *= $gc1" {
+		incr amatches
+		exp_continue
+	}
+	-re "$gj *= $gj1" {
+		incr amatches
+		exp_continue
+	}
+	-re "$gn *= $gn1" {
+		incr amatches
+		exp_continue
+	}
+	-re "$gs *= $gs1" {
+		incr amatches
+		exp_continue
+	}
+	-re "$gw *= $gw1" {
+		incr amatches
+		exp_continue
+	}
+	-re "$mm *= $mm1" {
+		incr amatches
+		exp_continue
+	}
 	-re "$mc *= $mc1" {
 		incr amatches
 		exp_continue
@@ -111,6 +228,10 @@ expect {
 		incr amatches
 		exp_continue
 	}
+	-re "$ms *= $ms1" {
+		incr amatches
+		exp_continue
+	}
 	-re "$mw *= $mw1" {
 		incr amatches
 		exp_continue
@@ -137,8 +258,8 @@ if {$access_err != 0} {
 	send_user "\nWARNING: not authorized to perform this test\n"
 	exit $exit_code
 }
-if {$amatches != 12} {
-	send_user "\nFAILURE:  sacctmgr had a problem adding clusters\n"
+if {$amatches != 20} {
+	send_user "\nFAILURE:  sacctmgr had a problem adding clusters got $amatches\n"
 	set exit_code 1
 }
 if { ![check_acct_associations] } {
diff --git a/testsuite/expect/test21.7 b/testsuite/expect/test21.7
index 0f464fdd1..9e61b632d 100755
--- a/testsuite/expect/test21.7
+++ b/testsuite/expect/test21.7
@@ -44,8 +44,16 @@ set mod		modify
 set nams	Names
 set nam		Name
 set fs		Fairshare
-set mc		MaxCPUSecs
+set gm		GrpCPUMins
+set gc		GrpCPUs
+set gj		GrpJobs
+set gs		GrpSubmitJobs
+set gn		GrpNodes
+set gw		GrpWall
+set mm		MaxCPUMins
+set mc		MaxCPUs
 set mj		MaxJobs
+set ms		MaxSubmitJobs
 set mn		MaxNodes
 set mw		MaxWall
 set clu		cluster
@@ -53,10 +61,19 @@ set tc1		tcluster1
 set tc2		tcluster2
 set tc3		tcluster3
 set fs1		2500
-set mc1		1000000
-set mj1		50
+set gm1		1000000
+set gc1		50
+set gj1		100
+set gs1		500
+set gn1		300
+set gw1		00:45:00
+set mc1		100
+set mm1		100000
+set mj1		500
+set ms1		500
 set mn1		300
 set mw1		01:00:00
+
 set access_err  0
 
 print_header $test_id
@@ -65,11 +82,21 @@ if { [test_account_storage] == 0 } {
 	send_user "\nWARNING: This test can't be run without a usable AccountStorageType\n"
 	exit 0
 }
- 
+
+#
+# Verify if Administrator privileges
+#
+ if { [string compare [check_accounting_admin_level] "Administrator"] } {
+	send_user "\nWARNING: This test can't be run without being an Accounting administrator.\nUse sacctmgr mod user \$USER_NAME admin=admin.\n"
+	exit 0
+}
+
 #
 # Use sacctmgr to create a cluster
 #
-set sadd_pid [spawn $sacctmgr $add $clu $nams=$tc1,$tc2,$tc3 $fs=$fs1 $mc=$mc1 $mj=$mj1 $mn=$mn1 $mw=$mw1]
+set sadd_pid [spawn $sacctmgr $add $clu $nams=$tc1,$tc2,$tc3 $fs=$fs1  \
+$gm=$gm1 $gc=$gc1 $gj=$gj1 $gn=$gn1 $gs=$gs1 $gw=$gw1 $mc=$mc1 $mm=$mm1 \
+$mj=$mj1 $ms=$ms1 $mn=$mn1 $mw=$mw1]
 expect {
 	-re "privilege to preform this action" {
 		set access_err 1
@@ -91,7 +118,7 @@ expect {
 		incr amatches
 		exp_continue
 	}
-	-re "User Defaults" {
+	-re "Default Limits" {
 		incr amatches
 		exp_continue
 	}
@@ -99,6 +126,34 @@ expect {
 		incr amatches
 		exp_continue
 	}
+	-re "$gm *= $gm1" {
+		incr amatches
+		exp_continue
+	}
+	-re "$gc *= $gc1" {
+		incr amatches
+		exp_continue
+	}
+	-re "$gj *= $gj1" {
+		incr amatches
+		exp_continue
+	}
+	-re "$gn *= $gn1" {
+		incr amatches
+		exp_continue
+	}
+	-re "$gs *= $gs1" {
+		incr amatches
+		exp_continue
+	}
+	-re "$gw *= $gw1" {
+		incr amatches
+		exp_continue
+	}
+	-re "$mm *= $mm1" {
+		incr amatches
+		exp_continue
+	}
 	-re "$mc *= $mc1" {
 		incr amatches
 		exp_continue
@@ -111,6 +166,10 @@ expect {
 		incr amatches
 		exp_continue
 	}
+	-re "$ms *= $ms1" {
+		incr amatches
+		exp_continue
+	}
 	-re "$mw *= $mw1" {
 		incr amatches
 		exp_continue
@@ -137,7 +196,7 @@ if {$access_err != 0} {
 	send_user "\nWARNING: not authorized to perform this test\n"
 	exit $exit_code
 }
-if {$amatches != 12} {
+if {$amatches != 20} {
 	send_user "\nFAILURE:  sacctmgr had a problem adding clusters\n"
 	set exit_code 1
 }
@@ -149,7 +208,7 @@ if { ![check_acct_associations] } {
 #
 # Use sacctmgr to list the addition of cluster
 #
-set slist_pid [spawn $sacctmgr $lis $clu]
+set slist_pid [spawn $sacctmgr $lis $clu format=$fs,$gm,$gc,$gj,$gs,$gn,$gw,$mm,$mc,$mj,$ms,$mn,$mw ]
 expect {
 	-re "Cluster" {
 		incr lmatches
@@ -159,7 +218,7 @@ expect {
 		incr lmatches
 		exp_continue
 	}
-	-re "$fs1 *$mc1 *$mj1 *$mn1 *$mw1" {
+	-re "$fs1 *$gm1 *$gc1 *$gj1 *$gs1 *$gn1 *$gw1 *$mm1 *$mc1 *$mj1 *$ms1 *$mn1 *$mw1" {
 		incr lmatches
 		exp_continue
 	}
@@ -167,7 +226,7 @@ expect {
 		incr lmatches
 		exp_continue
 	}
-	-re "$fs1 *$mc1 *$mj1 *$mn1 *$mw1" {
+	-re "$fs1 *$gm1 *$gc1 *$gj1 *$gs1 *$gn1 *$gw1 *$mm1 *$mc1 *$mj1 *$ms1 *$mn1 *$mw1" {
 		incr lmatches
 		exp_continue
 	}
@@ -175,7 +234,7 @@ expect {
 		incr lmatches
 		exp_continue
 	}
-	-re "$fs1 *$mc1 *$mj1 *$mn1 *$mw1" {
+	-re "$fs1 *$gm1 *$gc1 *$gj1 *$gs1 *$gn1 *$gw1 *$mm1 *$mc1 *$mj1 *$ms1 *$mn1 *$mw1" {
 		incr lmatches
 		exp_continue
 	}
diff --git a/testsuite/expect/test21.8 b/testsuite/expect/test21.8
index 6d414415c..05410591e 100755
--- a/testsuite/expect/test21.8
+++ b/testsuite/expect/test21.8
@@ -45,8 +45,16 @@ set mod		modify
 set nams	Names
 set nam		Name
 set fs		Fairshare
-set mc		MaxCPUSecs
+set gm		GrpCPUMins
+set gc		GrpCPUs
+set gj		GrpJobs
+set gs		GrpSubmitJobs
+set gn		GrpNodes
+set gw		GrpWall
+set mm		MaxCPUMins
+set mc		MaxCPUs
 set mj		MaxJobs
+set ms		MaxSubmitJobs
 set mn		MaxNodes
 set mw		MaxWall
 set clu		cluster
@@ -55,14 +63,31 @@ set tc2		tcluster2
 set tc3		tcluster3
 set fs1		2500
 set fs2		1375
-set mc1		1000000
-set mc2		200000
-set mj1		50
-set mj2		9
-set mn1		300
-set mn2		125
+set gm1		1000
+set gc1		20
+set gj1		100
+set gs1		300
+set gn1		100
+set gw1		00:45:00
+set mc1		200
+set mm1		100000
+set mj1		500
+set ms1		400
+set mn1		200
 set mw1		01:00:00
+set gm2		2000
+set gc2		50
+set gj2		200
+set gs2		400
+set gn2		150
+set gw2		00:45:00
+set mc2		100
+set mm2		20000
+set mj2		600
+set ms2		700
+set mn2		300
 set mw2		12:00:00
+
 set access_err  0
 
 print_header $test_id
@@ -72,10 +97,20 @@ if { [test_account_storage] == 0 } {
 	exit 0
 }
  
+#
+# Verify if Administrator privileges
+#
+ if { [string compare [check_accounting_admin_level] "Administrator"] } {
+	send_user "\nWARNING: This test can't be run without being an Accounting administrator.\nUse sacctmgr mod user \$USER_NAME admin=admin.\n"
+	exit 0
+}
+
 #
 # Use sacctmgr to create a cluster
 #
-set sadd_pid [spawn $sacctmgr $add $clu $nams=$tc1,$tc2,$tc3 $fs=$fs1 $mc=$mc1 $mj=$mj1 $mn=$mn1 $mw=$mw1]
+set sadd_pid [spawn $sacctmgr $add $clu $nams=$tc1,$tc2,$tc3 $fs=$fs1  \
+$gm=$gm1 $gc=$gc1 $gj=$gj1 $gn=$gn1 $gs=$gs1 $gw=$gw1 $mc=$mc1 $mm=$mm1 \
+$mj=$mj1 $ms=$ms1 $mn=$mn1 $mw=$mw1]
 expect {
 	-re "privilege to preform this action" {
 		set access_err 1
@@ -97,7 +132,7 @@ expect {
 		incr amatches
 		exp_continue
 	}
-	-re "User Defaults" {
+	-re "Default Limits" {
 		incr amatches
 		exp_continue
 	}
@@ -105,6 +140,34 @@ expect {
 		incr amatches
 		exp_continue
 	}
+	-re "$gm *= $gm1" {
+		incr amatches
+		exp_continue
+	}
+	-re "$gc *= $gc1" {
+		incr amatches
+		exp_continue
+	}
+	-re "$gj *= $gj1" {
+		incr amatches
+		exp_continue
+	}
+	-re "$gn *= $gn1" {
+		incr amatches
+		exp_continue
+	}
+	-re "$gs *= $gs1" {
+		incr amatches
+		exp_continue
+	}
+	-re "$gw *= $gw1" {
+		incr amatches
+		exp_continue
+	}
+	-re "$mm *= $mm1" {
+		incr amatches
+		exp_continue
+	}
 	-re "$mc *= $mc1" {
 		incr amatches
 		exp_continue
@@ -117,6 +180,10 @@ expect {
 		incr amatches
 		exp_continue
 	}
+	-re "$ms *= $ms1" {
+		incr amatches
+		exp_continue
+	}
 	-re "$mw *= $mw1" {
 		incr amatches
 		exp_continue
@@ -143,7 +210,7 @@ if {$access_err != 0} {
 	send_user "\nWARNING: not authorized to perform this test\n"
 	exit $exit_code
 }
-if {$amatches != 12} {
+if {$amatches != 20} {
 	send_user "\nFAILURE:  sacctmgr had a problem adding clusters\n"
 	set exit_code 1
 }
@@ -155,13 +222,15 @@ if { ![check_acct_associations] } {
 #
 # Use sacctmgr to modify one cluster
 #
-set smod_pid [spawn $sacctmgr $mod $clu set $fs=$fs2 $mc=$mc2 $mj=$mj2 $mn=$mn2 $mw=$mw2 where $nams=$tc2]
+set smod_pid [spawn $sacctmgr $mod $clu set $fs=$fs2   \
+$gm=$gm2 $gc=$gc2 $gj=$gj2 $gn=$gn2 $gs=$gs2 $gw=$gw2 $mc=$mc2 $mm=$mm2 \
+$mj=$mj2 $ms=$ms2 $mn=$mn2 $mw=$mw2 where $nams=$tc2]
 expect {
 	-re "Setting" {
 		incr mmatches
 		exp_continue
 	}
-	-re "User Defaults" {
+	-re "Default Limits" {
 		incr mmatches
 		exp_continue
 	}
@@ -169,6 +238,34 @@ expect {
 		incr mmatches
 		exp_continue
 	}
+	-re "$gm *= $gm2" {
+		incr mmatches
+		exp_continue
+	}
+	-re "$gc *= $gc2" {
+		incr mmatches
+		exp_continue
+	}
+	-re "$gj *= $gj2" {
+		incr mmatches
+		exp_continue
+	}
+	-re "$gn *= $gn2" {
+		incr mmatches
+		exp_continue
+	}
+	-re "$gs *= $gs2" {
+		incr mmatches
+		exp_continue
+	}
+	-re "$gw *= $gw2" {
+		incr mmatches
+		exp_continue
+	}
+	-re "$mm *= $mm2" {
+		incr mmatches
+		exp_continue
+	}
 	-re "$mc *= $mc2" {
 		incr mmatches
 		exp_continue
@@ -181,6 +278,10 @@ expect {
 		incr mmatches
 		exp_continue
 	}
+	-re "$ms *= $ms2" {
+		incr mmatches
+		exp_continue
+	}
 	-re "$mw *= $mw2" {
 		incr mmatches
 		exp_continue
@@ -204,7 +305,7 @@ expect {
 	}
 }
 
-if {$mmatches != 9} {
+if {$mmatches != 17} {
 	send_user "\nFAILURE:  sacctmgr had a problem modifying clusters\n"
 	set exit_code 1
 }
@@ -216,7 +317,7 @@ if { ![check_acct_associations] } {
 #
 # Use sacctmgr to list the addition of cluster
 #
-set slist_pid [spawn $sacctmgr $lis $clu]
+set slist_pid [spawn $sacctmgr $lis $clu format=$fs,$gm,$gc,$gj,$gs,$gn,$gw,$mm,$mc,$mj,$ms,$mn,$mw]
 expect {
 	-re "Cluster" {
 		incr lmatches
@@ -226,7 +327,7 @@ expect {
 		incr lmatches
 		exp_continue
 	}
-	-re "$fs1 *$mc1 *$mj1 *$mn1 *$mw1" {
+	-re "$fs1 *$gm1 *$gc1 *$gj1 *$gs1 *$gn1 *$gw1 *$mm1 *$mc1 *$mj1 *$ms1 *$mn1 *$mw1" {
 		incr lmatches
 		exp_continue
 	}
@@ -234,7 +335,7 @@ expect {
 		incr lmatches
 		exp_continue
 	}
-	-re "$fs2 *$mc2 *$mj2 *$mn2 *$mw2" {
+	-re "$fs2 *$gm2 *$gc2 *$gj2 *$gs2 *$gn2 *$gw2 *$mm2 *$mc2 *$mj2 *$ms2 *$mn2 *$mw2" {
 		incr lmatches
 		exp_continue
 	}
@@ -242,7 +343,7 @@ expect {
 		incr lmatches
 		exp_continue
 	}
-	-re "$fs1 *$mc1 *$mj1 *$mn1 *$mw1" {
+	-re "$fs1 *$gm1 *$gc1 *$gj1 *$gs1 *$gn1 *$gw1 *$mm1 *$mc1 *$mj1 *$ms1 *$mn1 *$mw1" {
 		incr lmatches
 		exp_continue
 	}
diff --git a/testsuite/expect/test21.9 b/testsuite/expect/test21.9
index 4ec519450..372ba5c44 100755
--- a/testsuite/expect/test21.9
+++ b/testsuite/expect/test21.9
@@ -45,8 +45,16 @@ set mod		modify
 set nams	Names
 set nam		Name
 set fs		Fairshare
-set mc		MaxCPUSecs
+set gm		GrpCPUMins
+set gc		GrpCPUs
+set gj		GrpJobs
+set gs		GrpSubmitJobs
+set gn		GrpNodes
+set gw		GrpWall
+set mm		MaxCPUMins
+set mc		MaxCPUs
 set mj		MaxJobs
+set ms		MaxSubmitJobs
 set mn		MaxNodes
 set mw		MaxWall
 set clu		cluster
@@ -55,14 +63,31 @@ set tc2		tcluster2
 set tc3		tcluster3
 set fs1		2500
 set fs2		1375
-set mc1		1000000
-set mc2		200000
-set mj1		50
-set mj2		9
-set mn1		300
-set mn2		125
+set gm1		1000
+set gc1		20
+set gj1		100
+set gs1		300
+set gn1		100
+set gw1		00:45:00
+set mc1		200
+set mm1		100000
+set mj1		500
+set ms1		400
+set mn1		200
 set mw1		01:00:00
+set gm2		2000
+set gc2		50
+set gj2		200
+set gs2		400
+set gn2		150
+set gw2		00:45:00
+set mc2		100
+set mm2		20000
+set mj2		600
+set ms2		700
+set mn2		300
 set mw2		12:00:00
+
 set access_err  0
 
 print_header $test_id
@@ -72,10 +97,20 @@ if { [test_account_storage] == 0 } {
 	exit 0
 }
  
+#
+# Verify if Administrator privileges
+#
+ if { [string compare [check_accounting_admin_level] "Administrator"] } {
+	send_user "\nWARNING: This test can't be run without being an Accounting administrator.\nUse sacctmgr mod user \$USER_NAME admin=admin.\n"
+	exit 0
+}
+
 #
 # Use sacctmgr to create a cluster
 #
-set sadd_pid [spawn $sacctmgr $add $clu $nams=$tc1,$tc2,$tc3 $fs=$fs1 $mc=$mc1 $mj=$mj1 $mn=$mn1 $mw=$mw1]
+set sadd_pid [spawn $sacctmgr $add $clu $nams=$tc1,$tc2,$tc3 $fs=$fs1   \
+$gm=$gm1 $gc=$gc1 $gj=$gj1 $gn=$gn1 $gs=$gs1 $gw=$gw1 $mc=$mc1 $mm=$mm1 \
+$mj=$mj1 $ms=$ms1 $mn=$mn1 $mw=$mw1]
 expect {
 	-re "privilege to preform this action" {
 		set access_err 1
@@ -97,7 +132,7 @@ expect {
 		incr amatches
 		exp_continue
 	}
-	-re "User Defaults" {
+	-re "Default Limits" {
 		incr amatches
 		exp_continue
 	}
@@ -105,6 +140,34 @@ expect {
 		incr amatches
 		exp_continue
 	}
+	-re "$gm *= $gm1" {
+		incr amatches
+		exp_continue
+	}
+	-re "$gc *= $gc1" {
+		incr amatches
+		exp_continue
+	}
+	-re "$gj *= $gj1" {
+		incr amatches
+		exp_continue
+	}
+	-re "$gn *= $gn1" {
+		incr amatches
+		exp_continue
+	}
+	-re "$gs *= $gs1" {
+		incr amatches
+		exp_continue
+	}
+	-re "$gw *= $gw1" {
+		incr amatches
+		exp_continue
+	}
+	-re "$mm *= $mm1" {
+		incr amatches
+		exp_continue
+	}
 	-re "$mc *= $mc1" {
 		incr amatches
 		exp_continue
@@ -117,6 +180,10 @@ expect {
 		incr amatches
 		exp_continue
 	}
+	-re "$ms *= $ms1" {
+		incr amatches
+		exp_continue
+	}
 	-re "$mw *= $mw1" {
 		incr amatches
 		exp_continue
@@ -143,7 +210,7 @@ if {$access_err != 0} {
 	send_user "\nWARNING: not authorized to perform this test\n"
 	exit $exit_code
 }
-if {$amatches != 12} {
+if {$amatches != 20} {
 	send_user "\nFAILURE:  sacctmgr had a problem adding clusters\n"
 	set exit_code 1
 }
@@ -155,13 +222,15 @@ if { ![check_acct_associations] } {
 #
 # Use sacctmgr to modify one cluster
 #
-set smod_pid [spawn $sacctmgr $mod $clu set $fs=$fs2 $mc=$mc2 $mj=$mj2 $mn=$mn2 $mw=$mw2 where $nams=$tc1,$tc2,$tc3]
+set smod_pid [spawn $sacctmgr $mod $clu set $fs=$fs2    \
+$gm=$gm2 $gc=$gc2 $gj=$gj2 $gn=$gn2 $gs=$gs2 $gw=$gw2 $mc=$mc2 $mm=$mm2 \
+$mj=$mj2 $ms=$ms2 $mn=$mn2 $mw=$mw2 where $nams=$tc1,$tc2,$tc3]
 expect {
 	-re "Setting" {
 		incr mmatches
 		exp_continue
 	}
-	-re "User Defaults" {
+	-re "Default Limits" {
 		incr mmatches
 		exp_continue
 	}
@@ -169,6 +238,34 @@ expect {
 		incr mmatches
 		exp_continue
 	}
+	-re "$gm *= $gm2" {
+		incr mmatches
+		exp_continue
+	}
+	-re "$gc *= $gc2" {
+		incr mmatches
+		exp_continue
+	}
+	-re "$gj *= $gj2" {
+		incr mmatches
+		exp_continue
+	}
+	-re "$gn *= $gn2" {
+		incr mmatches
+		exp_continue
+	}
+	-re "$gs *= $gs2" {
+		incr mmatches
+		exp_continue
+	}
+	-re "$gw *= $gw2" {
+		incr mmatches
+		exp_continue
+	}
+	-re "$mm *= $mm2" {
+		incr mmatches
+		exp_continue
+	}
 	-re "$mc *= $mc2" {
 		incr mmatches
 		exp_continue
@@ -181,6 +278,10 @@ expect {
 		incr mmatches
 		exp_continue
 	}
+	-re "$ms *= $ms2" {
+		incr mmatches
+		exp_continue
+	}
 	-re "$mw *= $mw2" {
 		incr mmatches
 		exp_continue
@@ -204,7 +305,7 @@ expect {
 	}
 }
 
-if {$mmatches != 9} {
+if {$mmatches != 17} {
 	send_user "\nFAILURE:  sacctmgr had a problem modifying clusters\n"
 	set exit_code 1
 }
@@ -216,7 +317,7 @@ if { ![check_acct_associations] } {
 #
 # Use sacctmgr to list the addition of cluster
 #
-set slist_pid [spawn $sacctmgr $lis $clu]
+set slist_pid [spawn $sacctmgr $lis $clu format=$fs,$gm,$gc,$gj,$gs,$gn,$gw,$mm,$mc,$mj,$ms,$mn,$mw]
 expect {
 	-re "Cluster" {
 		incr lmatches
@@ -226,7 +327,7 @@ expect {
 		incr lmatches
 		exp_continue
 	}
-	-re "$fs2 *$mc2 *$mj2 *$mn2 *$mw2" {
+	-re "$fs2 *$gm2 *$gc2 *$gj2 *$gs2 *$gn2 *$gw2 *$mm2 *$mc2 *$mj2 *$ms2 *$mn2 *$mw2" {
 		incr lmatches
 		exp_continue
 	}
@@ -234,7 +335,7 @@ expect {
 		incr lmatches
 		exp_continue
 	}
-	-re "$fs2 *$mc2 *$mj2 *$mn2 *$mw2" {
+	-re "$fs2 *$gm2 *$gc2 *$gj2 *$gs2 *$gn2 *$gw2 *$mm2 *$mc2 *$mj2 *$ms2 *$mn2 *$mw2" {
 		incr lmatches
 		exp_continue
 	}
@@ -242,7 +343,7 @@ expect {
 		incr lmatches
 		exp_continue
 	}
-	-re "$fs2 *$mc2 *$mj2 *$mn2 *$mw2" {
+	-re "$fs2 *$gm2 *$gc2 *$gj2 *$gs2 *$gn2 *$gw2 *$mm2 *$mc2 *$mj2 *$ms2 *$mn2 *$mw2" {
 		incr lmatches
 		exp_continue
 	}
diff --git a/testsuite/expect/test7.11 b/testsuite/expect/test7.11
index dd278611b..2ab18f0e8 100755
--- a/testsuite/expect/test7.11
+++ b/testsuite/expect/test7.11
@@ -224,6 +224,10 @@ if {[wait_for_file $spank_out] == 0} {
 			incr matches
 			exp_continue
 		}
+		-re "spank_get_item: argv" {
+			incr matches
+			exp_continue
+		}
 		-re "slurm_spank_exit: opt_arg=5" {
 			incr matches
 			exp_continue
@@ -232,7 +236,7 @@ if {[wait_for_file $spank_out] == 0} {
 			wait
 		}
 	}
-	if {$matches != 3} {
+	if {$matches != 4} {
 		send_user "\nFAILURE: remote (slurmd) spank plugin failure\n"
 		set exit_code 1
 	} else {
diff --git a/testsuite/expect/test7.11.prog.c b/testsuite/expect/test7.11.prog.c
index 954e8a1bf..9d49ce56b 100644
--- a/testsuite/expect/test7.11.prog.c
+++ b/testsuite/expect/test7.11.prog.c
@@ -96,6 +96,8 @@ slurm_spank_local_user_init(spank_t sp, int ac, char **av)
 int slurm_spank_task_init(spank_t sp, int ac, char **av)
 {
 	uid_t my_uid;
+	int argc, i;
+	char **argv;
 
 	if (opt_out_file && opt_arg) {
 		FILE *fp = fopen(opt_out_file, "a");
@@ -104,6 +106,13 @@ int slurm_spank_task_init(spank_t sp, int ac, char **av)
 		fprintf(fp, "slurm_spank_task_init: opt_arg=%d\n", opt_arg);
 		if (spank_get_item(sp, S_JOB_UID, &my_uid) == ESPANK_SUCCESS)
 			fprintf(fp, "spank_get_item: my_uid=%d\n", my_uid);
+                if (spank_get_item(sp, S_JOB_ARGV, &argc, &argv) == 
+		    ESPANK_SUCCESS) {
+			for (i=0; i<argc; i++) {
+				fprintf(fp, "spank_get_item: argv[%d]=%s\n", 
+					i, argv[i]);
+			}
+		}
 		fclose(fp);
 	}
 	return (0);
diff --git a/testsuite/expect/test7.7 b/testsuite/expect/test7.7
index c55a44bc1..7a90ba1cf 100755
--- a/testsuite/expect/test7.7
+++ b/testsuite/expect/test7.7
@@ -150,7 +150,7 @@ make_bash_script $file_in "
   echo BEGIN
   $bin_sleep 20
   echo FINI
-  exit 123"
+  exit 0"
 set job_id1 0
 set job_id2 0
 set sbatch_pid [spawn $sbatch -N1-1024 --output=$file_out --comment=test -t1 $file_in]
diff --git a/testsuite/expect/test7.7.prog.c b/testsuite/expect/test7.7.prog.c
index cc8a53685..81ba2a5e3 100644
--- a/testsuite/expect/test7.7.prog.c
+++ b/testsuite/expect/test7.7.prog.c
@@ -25,6 +25,7 @@
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \*****************************************************************************/
 
+#include <errno.h>
 #include <netdb.h>
 #include <stdio.h>
 #include <stdlib.h>
@@ -71,7 +72,7 @@ static int _conn_wiki_port(char *host, int port)
 
 static int _conn_event_port(char *host, int port)
 {
-	int sock_fd;
+	int i, rc, sock_fd;
 	struct sockaddr_in wiki_addr;
 	struct hostent *hptr;
 
@@ -88,11 +89,19 @@ static int _conn_event_port(char *host, int port)
 	wiki_addr.sin_family = AF_INET;
 	wiki_addr.sin_port   = htons(port);
 	memcpy(&wiki_addr.sin_addr.s_addr, hptr->h_addr, hptr->h_length);
-	if (bind(sock_fd, (struct sockaddr *) &wiki_addr,
-			sizeof(wiki_addr))) {
-		printf("WARNING: bind to port %i failed, may not be real error\n",
-			port);
-		return -1;
+	for (i=0; ; i++) {
+		if (i)
+			sleep(5);
+		rc = bind(sock_fd, (struct sockaddr *) &wiki_addr,
+			  sizeof(wiki_addr));
+		if (rc == 0)
+			break;
+		if ((errno != EINVAL) || (i > 5)) {
+			printf("WARNING: bind to port %i; %s\n", 
+			       port, strerror(errno));
+			return -1;
+		}
+		printf("WARNING: port %i in use, retrying\n", port);
 	}
 	listen(sock_fd, 1);
 	return sock_fd;
@@ -179,7 +188,7 @@ static char *_recv_msg(int fd)
 	return buf;
 }	
 
-static void _xmit(char *msg)
+static int _xmit(char *msg)
 {
 	int msg_len = strlen(msg);
 	char *out_msg, *in_msg, sum[20], *sc_ptr;
@@ -199,12 +208,11 @@ static void _xmit(char *msg)
 	printf("recv:%s\n\n", in_msg);
 	sc_ptr = strstr(in_msg, "SC=");
 	sc = atoi(sc_ptr+3);
-	if (sc != 0) {
+	if (sc != 0)
 		fprintf(stderr, "RPC failure\n");
-		exit(1);
-	} 
 	free(in_msg);
 	close(wiki_fd);
+	return sc;
 }
 
 static void _event_mgr(void)
@@ -240,19 +248,22 @@ static void _get_jobs(void)
 	snprintf(out_msg, sizeof(out_msg),
 		"TS=%u AUTH=root DT=%s",
 		(uint32_t) now, "CMD=GETJOBS ARG=0:ALL");
-	_xmit(out_msg);
+	if (_xmit(out_msg))
+		exit(1);
 
 	/* Dump volitile data */
 	snprintf(out_msg, sizeof(out_msg),
 		"TS=%u AUTH=root DT=CMD=GETJOBS ARG=%u:ALL",
 		(uint32_t) now, (uint32_t) 1);
-	_xmit(out_msg);
+	if (_xmit(out_msg))
+		exit(1);
 
 	/* Dump state only */
 	snprintf(out_msg, sizeof(out_msg),
 		"TS=%u AUTH=root DT=CMD=GETJOBS ARG=%u:ALL",
 		(uint32_t) now, (uint32_t) (now+2));
-	_xmit(out_msg);
+	if (_xmit(out_msg))
+		exit(1);
 }
 
 static void _get_nodes(void)
@@ -264,19 +275,22 @@ static void _get_nodes(void)
 	snprintf(out_msg, sizeof(out_msg),
 		"TS=%u AUTH=root DT=%s", 
 		(uint32_t) now, "CMD=GETNODES ARG=0:ALL");
-	_xmit(out_msg);
+	if (_xmit(out_msg))
+		exit(1);
 
 	/* Dump volitile data */
 	snprintf(out_msg, sizeof(out_msg),
 		"TS=%u AUTH=root DT=CMD=GETNODES ARG=%u:ALL",
 		(uint32_t) now, (uint32_t) 1);
-	_xmit(out_msg);
+	if (_xmit(out_msg))
+		exit(1);
 
 	/* Dump state only */
 	snprintf(out_msg, sizeof(out_msg),
 		"TS=%u AUTH=root DT=CMD=GETNODES ARG=%u:ALL",
 		(uint32_t) now, (uint32_t) (now+2));
-	_xmit(out_msg);
+	if (_xmit(out_msg))
+		exit(1);
 }
 
 static void _cancel_job(long my_job_id)
@@ -289,20 +303,32 @@ static void _cancel_job(long my_job_id)
 		"TYPE=ADMIN "
 		"COMMENT=\"cancel comment\" ",
 		(uint32_t) now, my_job_id);
-	_xmit(out_msg);
+	if (_xmit(out_msg))
+		exit(1);
 }
 
 static void _start_job(long my_job_id)
 {
 	time_t now = time(NULL);
 	char out_msg[128];
+	int i, rc;
 
 	snprintf(out_msg, sizeof(out_msg),
 		"TS=%u AUTH=root DT=CMD=STARTJOB ARG=%ld "
 		"COMMENT=\'start comment\' "
 		"TASKLIST=",	/* Empty TASKLIST means we don't care */
 		(uint32_t) now, my_job_id);
-	_xmit(out_msg);
+
+	for (i=0; i<10; i++) {
+		if (i)
+			sleep(10);
+		rc = _xmit(out_msg);
+		if (rc == 0)
+			break;
+		/* Still completing after requeue */
+	}
+	if (rc != 0)
+		exit(1);
 }
 
 static void _suspend_job(long my_job_id)
@@ -313,7 +339,8 @@ static void _suspend_job(long my_job_id)
 	snprintf(out_msg, sizeof(out_msg),
 		"TS=%u AUTH=root DT=CMD=SUSPENDJOB ARG=%ld",
 		(uint32_t) now, my_job_id);
-	_xmit(out_msg);
+	if (_xmit(out_msg))
+		exit(1);
 }
 
 static void _signal_job(long my_job_id)
@@ -324,7 +351,8 @@ static void _signal_job(long my_job_id)
 	snprintf(out_msg, sizeof(out_msg),
 		"TS=%u AUTH=root DT=CMD=SIGNALJOB ARG=%ld VALUE=URG",
 		(uint32_t) now, my_job_id);
-	_xmit(out_msg);
+	if (_xmit(out_msg))
+		exit(1);
 }
 
 static void _modify_job(long my_job_id)
@@ -343,7 +371,8 @@ static void _modify_job(long my_job_id)
 		/* "INVALID=123 " */
 		"TIMELIMIT=10 BANK=test_bank",
 		(uint32_t) now, my_job_id);
-	_xmit(out_msg);
+	if (_xmit(out_msg))
+		exit(1);
 }
 
 static void _notify_job(long my_job_id)
@@ -355,7 +384,8 @@ static void _notify_job(long my_job_id)
 		"TS=%u AUTH=root DT=CMD=NOTIFYJOB ARG=%ld "
 		"MSG=this_is_a_test",
 		(uint32_t) now, my_job_id);
-	_xmit(out_msg);
+	if (_xmit(out_msg))
+		exit(1);
 }
 
 static void _resume_job(long my_job_id)
@@ -366,7 +396,8 @@ static void _resume_job(long my_job_id)
 	snprintf(out_msg, sizeof(out_msg),
 		"TS=%u AUTH=root DT=CMD=RESUMEJOB ARG=%ld",
 		(uint32_t) now, my_job_id);
-	_xmit(out_msg);
+	if (_xmit(out_msg))
+		exit(1);
 }
 
 static void _job_requeue(long my_job_id)
@@ -377,7 +408,8 @@ static void _job_requeue(long my_job_id)
 	snprintf(out_msg, sizeof(out_msg),
 		"TS=%u AUTH=root DT=CMD=REQUEUEJOB ARG=%ld",
 		(uint32_t) now, my_job_id);
-	_xmit(out_msg);
+	if (_xmit(out_msg))
+		exit(1);
 }
 
 static void _job_will_run(long my_job_id)
@@ -389,7 +421,8 @@ static void _job_will_run(long my_job_id)
 		"TS=%u AUTH=root DT=CMD=JOBWILLRUN ARG=JOBID=%ld,%s",
 		(uint32_t) now, my_job_id,
 		"");		/* put available node list here */
-	_xmit(out_msg);
+	if (_xmit(out_msg))
+		exit(1);
 }
 
 static void _initialize(void)
@@ -400,7 +433,8 @@ static void _initialize(void)
 	snprintf(out_msg, sizeof(out_msg),
 		"TS=%u AUTH=root DT=CMD=INITIALIZE ARG=USEHOSTEXP=N EPORT=%u",
 		(uint32_t) now, e_port);
-	_xmit(out_msg);
+	if (_xmit(out_msg))
+		exit(1);
 }
 
 static void _single_msg(void)
@@ -411,8 +445,10 @@ static void _single_msg(void)
 	snprintf(out_msg, sizeof(out_msg),
 		"TS=%u AUTH=root DT=CMD=%s",
 		(uint32_t) now, 
-		"JOBWILLRUN ARG=JOBID=65537,bgl[000x733] JOBID=65539,bgl[000x733] JOBID=65538,bgl[000x733]");
-	_xmit(out_msg);
+		"JOBWILLRUN ARG=JOBID=65537,bgl[000x733] "
+		"JOBID=65539,bgl[000x733] JOBID=65538,bgl[000x733]");
+	if (_xmit(out_msg))
+		exit(1);
 }
 
 int main(int argc, char * argv[])
@@ -458,7 +494,7 @@ int main(int argc, char * argv[])
 	}
 	_cancel_job(job_id+1);
 	_job_requeue(job_id);	/* Put job back into HELD state */
-	sleep(15);
+	sleep(10);
 	_start_job(job_id);
 	_get_jobs();
 #endif
diff --git a/testsuite/expect/test7.8 b/testsuite/expect/test7.8
index 0713a576b..e170fa2f7 100755
--- a/testsuite/expect/test7.8
+++ b/testsuite/expect/test7.8
@@ -110,7 +110,7 @@ make_bash_script $file_in "
   echo BEGIN
   $bin_sleep 20
   echo FINI
-  exit 123"
+  exit 0"
 
 set job_id1 0
 set job_id2 0
diff --git a/testsuite/expect/test9.8 b/testsuite/expect/test9.8
index 2f4129174..de7eacac2 100755
--- a/testsuite/expect/test9.8
+++ b/testsuite/expect/test9.8
@@ -108,7 +108,7 @@ if {$start_cnt < $job_cnt} {
 #
 set user_name ""
 
-exec $bin_sleep [expr $delay + 6]
+exec $bin_sleep $delay
 spawn $bin_id -un
 expect {
 	-re "($alpha_numeric_under)" {
@@ -126,16 +126,18 @@ expect {
 # Determine if this is AIX (for task count, federation switch 
 # prevents each node from running more than 16 tasks)
 #
-set desired_tasks [expr $task_cnt * 2 / 3]
 if {[test_aix]} {
 	set desired_tasks 15
+} else {
+	set desired_tasks [expr $task_cnt * 2 / 3]
 }
 
-set unresponsive 1
-while { $unresponsive } {
+set matches 0
+while { $matches < $desired_tasks } {
 	log_user 0
 	set matches 0
 	set timeout 60
+	exec $bin_sleep 3
 	spawn $squeue --steps --user $user_name
 	expect {
 		-re "sleep" {
@@ -143,16 +145,25 @@ while { $unresponsive } {
 			exp_continue
 		}
 		-re "error:" {
+			send_user "\nFAILURE: squeue error\n"
+			set exit_code 1
+			exp_continue
 		}
 		timeout {
+			break
 		}
 		eof {
 			wait
-			set unresponsive 0	
+			break
 		}
 	}
+	if {$matches == 0} {
+		break
+	}
+}
+if {[test_aix]} {
+	sleep 5
 }
-if {[test_aix]} {sleep 5}
 
 log_user 1
 if {$matches < $desired_tasks} {
-- 
GitLab